hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1820e413024564a4aab74a55fd9bc9b780e0d8f1 | 3,035 | py | Python | src/others/JVMDescriptorToJSON.py | RRua/AnaDroid | 7417b117a50149a6f210cd334de71b814db8d6c7 | [
"MIT"
] | 7 | 2019-01-17T18:37:59.000Z | 2020-11-16T13:42:29.000Z | src/others/JVMDescriptorToJSON.py | RRua/AnaDroid | 7417b117a50149a6f210cd334de71b814db8d6c7 | [
"MIT"
] | null | null | null | src/others/JVMDescriptorToJSON.py | RRua/AnaDroid | 7417b117a50149a6f210cd334de71b814db8d6c7 | [
"MIT"
] | null | null | null |
import re,sys, json
knownRetTypes = {
"V" : "Void" ,
"Z" : "boolean",
"B" : "byte",
"S" : "short",
"C" : "char",
"I" : "int",
"J" : "long",
"F" :"float",
"D" : "double"
}
if __name__== "__main__":
if len(sys.argv) > 1:
print("parsing descriptors of file " + sys.argv[1] )
parseDescriptorsFile(sys.argv[1])
else:
print ("arg required ( filename )")
| 24.674797 | 107 | 0.611203 |
import re,sys, json
knownRetTypes = {
"V" : "Void" ,
"Z" : "boolean",
"B" : "byte",
"S" : "short",
"C" : "char",
"I" : "int",
"J" : "long",
"F" :"float",
"D" : "double"
}
def inferType(st):
if(len(st)>0):
if "[" in st:
#array . ex: I[]
return "[" + inferType(st[1:]) + "]"
if len(st) >1:
return parseMethod( st)
elif st[0] in knownRetTypes:
return knownRetTypes[st]
return ""
def parseMethod( full):
return re.sub(r'^L','',full ).replace("/",".").replace(";","").replace("_","")
def getArgList(argList):
l = []
defaultsep=";"
i=0
while i < len(argList):
char=argList[i]
if char=='[':
innerType = str(getArgList(argList[(i+1):])[0])
l.append( "[" + innerType + "]")
i=i+len(innerType)
elif char=='L':
i2 = argList.find(";", i, len(argList))
l.append( parseMethod( argList[i:i2]))
i=i2
elif char in knownRetTypes:
l.append( knownRetTypes[str(char)])
i=i+1
return l
def buildMethodObjFromLine(matcherObj):
method={}
method['threadID']=int(matcherObj.groups()[0])
method['inout']=matcherObj.groups()[2]
method['time']= int(matcherObj.groups()[4])
method['method']=re.sub(r'^(\.)+','',matcherObj.groups()[6])
method['args']= getArgList(matcherObj.groups()[8])
method['return']= getArgList(matcherObj.groups()[9])[0]
method['file'] = matcherObj.groups()[11]
return method
def loadprocesstracesRegex(fileName):
with open(fileName) as f:
all_traces = f.read().splitlines()
i = 0
methods=[]
for trace in all_traces:
#print(trace)
# tem erro
x=re.search(r"^([0-9]+)*(\s)+(xit|ent)(\s)+([0-9]+)+(\s|\-)([\w+.$]+)(\s)+\((.*?)\)(.*)(\s)+(.*)", trace)
#print(len(x.groups()))
# well formed trace line
if x and len(x.groups())==12:
#print(x.groups())
method = buildMethodObjFromLine(x)
methods.append(method)
print(i)
i=i+1
def dummySeparator(traceLine):
#print(traceLine)
spacesplit=traceLine.replace("\t"," ").split(" ")
method={}
#method['method']=re.sub(r'^(\.)+','',spacesplit[0])
method['name']= re.sub(r'^(\.)+','',spacesplit[0]).split(".")[-1]
method['class']= re.sub(r'^(\.)+','',spacesplit[0]).replace("."+method['name'],"")
method['args']= getArgList( (spacesplit[1]).split(")")[0] )
method['return']= getArgList( (spacesplit[1]).split(")")[1] ) [0]
method['file'] = spacesplit[-1]
#method['id'] = generateMethodId(method)
return method
def parseDescriptorsFile(filename):
all_descriptors=[]
methods_dict={}
with open(filename) as f:
all_descriptors = f.read().splitlines()
for line in all_descriptors:
jo={}
jo = dummySeparator(line)
if jo['class'] in methods_dict:
methods_dict[jo['class']].append(jo)
else:
methods_dict[jo['class']]=[]
methods_dict[jo['class']].append(jo)
with open( filename.replace(".txt",".json"), "w") as outfile:
json.dump( methods_dict , outfile,indent=2)
if __name__== "__main__":
if len(sys.argv) > 1:
print("parsing descriptors of file " + sys.argv[1] )
parseDescriptorsFile(sys.argv[1])
else:
print ("arg required ( filename )")
| 2,495 | 0 | 161 |
b9747b0736b980260aab984b289f61d68e837926 | 3,878 | py | Python | tests/render2/test_logging.py | ace-ecosystem/ACE | d17b5ef4bccf923ec6be5115fabe40f0627dab2d | [
"Apache-2.0"
] | 24 | 2019-09-21T21:09:45.000Z | 2022-03-15T19:48:13.000Z | tests/render2/test_logging.py | ace-ecosystem/ACE | d17b5ef4bccf923ec6be5115fabe40f0627dab2d | [
"Apache-2.0"
] | 54 | 2019-09-16T20:06:30.000Z | 2021-08-18T22:22:08.000Z | tests/render2/test_logging.py | ace-ecosystem/ACE | d17b5ef4bccf923ec6be5115fabe40f0627dab2d | [
"Apache-2.0"
] | 9 | 2019-09-08T13:35:55.000Z | 2021-01-03T15:23:37.000Z | import pytest
from render2.src.shared.shared_logging import get_logger, truncate, prep_for_logging, TRUNCATE_TEXT, TRUNCATE_LENGTH
LONG_STRING = "zxcvbnmasdfghjklqwertyuiop1234567890zxcvbnmasdfghjklqwertyu" \
"iop1234567890zxcvbnmasdzxcvkjapeorijfaldkcfjadfjapsoeifjadf"
TRUNCATED_STRING = f"{LONG_STRING[:(64 - TRUNCATE_LENGTH)]}{TRUNCATE_TEXT}"
# --------------------------------------------------------------
# Tests
# --------------------------------------------------------------
@pytest.mark.unit
def test_prep_for_logging_truncate_long_string_in_content():
"""Make sure data longer than max length gets truncated.
as a by-product, this also tests that 'None' is properly handled (not truncated)."""
# Setup
max_length = 32
truncated_string = f"{LONG_STRING[:(max_length - TRUNCATE_LENGTH)]}{TRUNCATE_TEXT}"
job = {'data': None, 'content_type': 'html', 'content': LONG_STRING}
expected = {'data': None, 'content_type': 'html', 'content': truncated_string}
# Execute
_job_for_logging = prep_for_logging(job, max_length=max_length)
# Verify
assert expected == _job_for_logging
assert len(_job_for_logging['content']) == max_length
@pytest.mark.unit
def test_prep_for_logging_truncate_long_string_in_data():
"""Truncate string in data field"""
# Setup
max_length = 32
truncated_string = f"{LONG_STRING[:(max_length - TRUNCATE_LENGTH)]}{TRUNCATE_TEXT}"
job = {'data': LONG_STRING, 'content_type': 'html', 'content': 'this_is_short'}
expected = {'data': truncated_string, 'content_type': 'html', 'content': 'this_is_short'}
# Execute
_job_for_logging = prep_for_logging(job, max_length=max_length)
# Verify
assert expected == _job_for_logging
assert len(_job_for_logging['data']) == max_length
@pytest.mark.unit
def test_prep_for_logging_truncate_long_bytes_string_in_data():
"""Truncate bytes string"""
# Setup
max_length = 32
truncated_string = f"{LONG_STRING[:(max_length - TRUNCATE_LENGTH)]}{TRUNCATE_TEXT}"
job = {'data': LONG_STRING.encode('utf-8'), 'content_type': 'html', 'content': 'this_is_short'}
expected = {'data': truncated_string, 'content_type': 'html', 'content': 'this_is_short'}
# Execute
_job_for_logging = prep_for_logging(job, max_length=max_length)
# Verify
assert expected == _job_for_logging
assert len(_job_for_logging['data']) == max_length
@pytest.mark.unit
def test_prep_for_logging_no_fields_truncated():
"""Test no fields are altered if they are all equal or less than
the max length."""
# Setup
max_length = 13
job = {'data': 'this_is_short', 'content_type': 'html', 'content': 'this_is_short'}
expected = job.copy()
# Execute and verify
assert expected == prep_for_logging(job, max_length=max_length)
@pytest.mark.unit
def test_prep_for_logging_return_only_truncated_text_due_to_small_max_length():
"""Make sure both data can be redacted and html can be truncated."""
# Setup
max_length = 5
job = {'data': None, 'content_type': 'html', 'content': LONG_STRING}
expected = {'data': None, 'content_type': 'html', 'content': TRUNCATE_TEXT}
# Execute
_job_for_logging = prep_for_logging(job, max_length=max_length)
# Verify
assert expected == _job_for_logging
assert TRUNCATE_LENGTH == len(_job_for_logging['content'])
@pytest.mark.unit
def test_record_truncation(caplog):
"""Ensure that the total LogRecord message is not over maximum size"""
# Setup
too_long = u"\U0001F926" * 65000
too_long_bytes = len(too_long.encode('utf-8'))
logger = get_logger("test")
# Execute
logger.info(f"{too_long}")
msg = caplog.messages[-1]
truncated_bytes = len(msg.encode('utf-8'))
# Verify
assert truncated_bytes < too_long_bytes
assert truncated_bytes < 265000 | 33.145299 | 116 | 0.689273 | import pytest
from render2.src.shared.shared_logging import get_logger, truncate, prep_for_logging, TRUNCATE_TEXT, TRUNCATE_LENGTH
LONG_STRING = "zxcvbnmasdfghjklqwertyuiop1234567890zxcvbnmasdfghjklqwertyu" \
"iop1234567890zxcvbnmasdzxcvkjapeorijfaldkcfjadfjapsoeifjadf"
TRUNCATED_STRING = f"{LONG_STRING[:(64 - TRUNCATE_LENGTH)]}{TRUNCATE_TEXT}"
# --------------------------------------------------------------
# Tests
# --------------------------------------------------------------
@pytest.mark.unit
def test_prep_for_logging_truncate_long_string_in_content():
"""Make sure data longer than max length gets truncated.
as a by-product, this also tests that 'None' is properly handled (not truncated)."""
# Setup
max_length = 32
truncated_string = f"{LONG_STRING[:(max_length - TRUNCATE_LENGTH)]}{TRUNCATE_TEXT}"
job = {'data': None, 'content_type': 'html', 'content': LONG_STRING}
expected = {'data': None, 'content_type': 'html', 'content': truncated_string}
# Execute
_job_for_logging = prep_for_logging(job, max_length=max_length)
# Verify
assert expected == _job_for_logging
assert len(_job_for_logging['content']) == max_length
@pytest.mark.unit
def test_prep_for_logging_truncate_long_string_in_data():
"""Truncate string in data field"""
# Setup
max_length = 32
truncated_string = f"{LONG_STRING[:(max_length - TRUNCATE_LENGTH)]}{TRUNCATE_TEXT}"
job = {'data': LONG_STRING, 'content_type': 'html', 'content': 'this_is_short'}
expected = {'data': truncated_string, 'content_type': 'html', 'content': 'this_is_short'}
# Execute
_job_for_logging = prep_for_logging(job, max_length=max_length)
# Verify
assert expected == _job_for_logging
assert len(_job_for_logging['data']) == max_length
@pytest.mark.unit
def test_prep_for_logging_truncate_long_bytes_string_in_data():
"""Truncate bytes string"""
# Setup
max_length = 32
truncated_string = f"{LONG_STRING[:(max_length - TRUNCATE_LENGTH)]}{TRUNCATE_TEXT}"
job = {'data': LONG_STRING.encode('utf-8'), 'content_type': 'html', 'content': 'this_is_short'}
expected = {'data': truncated_string, 'content_type': 'html', 'content': 'this_is_short'}
# Execute
_job_for_logging = prep_for_logging(job, max_length=max_length)
# Verify
assert expected == _job_for_logging
assert len(_job_for_logging['data']) == max_length
@pytest.mark.unit
def test_prep_for_logging_no_fields_truncated():
"""Test no fields are altered if they are all equal or less than
the max length."""
# Setup
max_length = 13
job = {'data': 'this_is_short', 'content_type': 'html', 'content': 'this_is_short'}
expected = job.copy()
# Execute and verify
assert expected == prep_for_logging(job, max_length=max_length)
@pytest.mark.unit
def test_prep_for_logging_return_only_truncated_text_due_to_small_max_length():
"""Make sure both data can be redacted and html can be truncated."""
# Setup
max_length = 5
job = {'data': None, 'content_type': 'html', 'content': LONG_STRING}
expected = {'data': None, 'content_type': 'html', 'content': TRUNCATE_TEXT}
# Execute
_job_for_logging = prep_for_logging(job, max_length=max_length)
# Verify
assert expected == _job_for_logging
assert TRUNCATE_LENGTH == len(_job_for_logging['content'])
@pytest.mark.unit
def test_record_truncation(caplog):
"""Ensure that the total LogRecord message is not over maximum size"""
# Setup
too_long = u"\U0001F926" * 65000
too_long_bytes = len(too_long.encode('utf-8'))
logger = get_logger("test")
# Execute
logger.info(f"{too_long}")
msg = caplog.messages[-1]
truncated_bytes = len(msg.encode('utf-8'))
# Verify
assert truncated_bytes < too_long_bytes
assert truncated_bytes < 265000 | 0 | 0 | 0 |
83b6e2c42dd3d476585d45a2415bd9a8cdd1ad0d | 558 | py | Python | src/auditlog/serializers.py | softcodesInt/softcode-admin-api | 697c1c6c3c9a3dc524a3e7c2271071e7c9c1f03f | [
"MIT"
] | null | null | null | src/auditlog/serializers.py | softcodesInt/softcode-admin-api | 697c1c6c3c9a3dc524a3e7c2271071e7c9c1f03f | [
"MIT"
] | null | null | null | src/auditlog/serializers.py | softcodesInt/softcode-admin-api | 697c1c6c3c9a3dc524a3e7c2271071e7c9c1f03f | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import StaffLog, CompanyLog
from accounts.serializers import UserSerializer
from company.serializers import CompanySerializer
| 23.25 | 56 | 0.738351 | from rest_framework import serializers
from .models import StaffLog, CompanyLog
from accounts.serializers import UserSerializer
from company.serializers import CompanySerializer
class StaffLogSerializer(serializers.ModelSerializer):
blamer = UserSerializer()
staff = UserSerializer()
class Meta:
model = StaffLog
fields = '__all__'
class CompanyLogSerializer(serializers.ModelSerializer):
blamer = UserSerializer()
company = CompanySerializer()
class Meta:
model = CompanyLog
fields = '__all__'
| 0 | 331 | 46 |
1360b1509138f330e4ce58ff72522720d598c5e7 | 546 | py | Python | atmcorr/reflectance/worldview.py | DHI-GRAS/atmcorr | 55e584c7971009065b47ece9d3d215bfe8335d04 | [
"MIT"
] | 5 | 2019-09-03T17:13:57.000Z | 2021-12-01T03:22:11.000Z | atmcorr/reflectance/worldview.py | DHI-GRAS/atmcorr | 55e584c7971009065b47ece9d3d215bfe8335d04 | [
"MIT"
] | 1 | 2021-04-28T08:11:37.000Z | 2021-04-28T09:52:02.000Z | atmcorr/reflectance/worldview.py | DHI-GRAS/atmcorr | 55e584c7971009065b47ece9d3d215bfe8335d04 | [
"MIT"
] | 1 | 2021-03-31T02:13:08.000Z | 2021-03-31T02:13:08.000Z | from dg_calibration import reflectance
def toa_reflectance(radata, mtdFile, band_ids):
"""Estimate toa reflectance from radiometric data
ignoring atmospheric, topographic and BRDF effects
Parameters
----------
radata : ndarray shape (nbands, ny, nx)
radiance data
mtdFile : str
path to IMD metadata file
band_ids : sequence of int
band IDs
Returns
-------
ndarray
reflectance
"""
return reflectance.radiance_to_reflectance(radata, mtdFile, band_ids=band_ids)
| 23.73913 | 82 | 0.663004 | from dg_calibration import reflectance
def toa_reflectance(radata, mtdFile, band_ids):
"""Estimate toa reflectance from radiometric data
ignoring atmospheric, topographic and BRDF effects
Parameters
----------
radata : ndarray shape (nbands, ny, nx)
radiance data
mtdFile : str
path to IMD metadata file
band_ids : sequence of int
band IDs
Returns
-------
ndarray
reflectance
"""
return reflectance.radiance_to_reflectance(radata, mtdFile, band_ids=band_ids)
| 0 | 0 | 0 |
7f68299122102c1ee4f94f3c756c5f55a44708b9 | 5,897 | py | Python | urbansprawl/population/data_extract.py | Oslandia/urbansprawl | afbc1da6ce640569571d26900a2cc97a063fb0a9 | [
"MIT"
] | 7 | 2019-01-07T14:41:48.000Z | 2020-07-01T06:50:17.000Z | urbansprawl/population/data_extract.py | Oslandia/urbansprawl | afbc1da6ce640569571d26900a2cc97a063fb0a9 | [
"MIT"
] | 6 | 2019-01-08T10:16:36.000Z | 2019-03-01T18:33:14.000Z | urbansprawl/population/data_extract.py | Oslandia/urbansprawl | afbc1da6ce640569571d26900a2cc97a063fb0a9 | [
"MIT"
] | 1 | 2019-01-21T08:51:49.000Z | 2019-01-21T08:51:49.000Z | ###############
# Repository: https://github.com/lgervasoni/urbansprawl
# MIT License
###############
from shapely.geometry import GeometryCollection
import geopandas as gpd
import pandas as pd
import os
import numpy as np
import osmnx as ox
from osmnx import log
from .utils import get_population_extract_filename
DATA_SOURCES = ["insee", "gpw"]
##############################
# I/O for population data
##############################
def get_df_extract(df_data, poly_gdf, operation="within"):
"""
Indexes input geo-data frame within an input region of interest
If the region of interest is given as a polygon, its bounding box is indexed
Parameters
----------
df_data : geopandas.GeoDataFrame
input data frame to index
poly_gdf : geopandas.GeoDataFrame
geodataframe containing the region of interest in form of polygon
operation : string
the desired spatial join operation: 'within' or 'intersects'
Returns
----------
geopandas.GeoDataFrame
returns the population data frame indexed within the region of interest
"""
# Project to same system coordinates
poly_gdf = ox.project_gdf(poly_gdf, to_crs=df_data.crs)
# Spatial join
df_extract = gpd.sjoin(df_data, poly_gdf, op=operation)
# Keep original columns
df_extract = df_extract[df_data.columns]
return df_extract
def get_population_df(
pop_shapefile, pop_data_file, data_source, to_crs, poly_gdf
):
"""
Read the population shapefile from input filename/s
Index the data within the bounding box
Project to desired CRS
Parameters
----------
pop_shapefile : string
population count shapefile
pop_data_file : string
population data additional file (required for INSEE format)
data_source : string
desired population data source
to_crs : dict
desired coordinate reference system
poly_gdf : geopandas.GeoDataFrame
geodataframe containing the region of interest in form of polygon
Returns
----------
geopandas.GeoDataFrame
returns the indexed and projected population data frame
"""
#######################################
# Load GPW/INSEE population data
#######################################
# Read population data
df_pop = gpd.read_file(pop_shapefile)
# Extract region of interest (EPSG 4326)
# Filter geometries not contained in bounding box
df_pop = get_df_extract(df_pop, poly_gdf)
if data_source is "insee":
#######################################
# Additional step for INSEE data
#######################################
# Read dbf files
data_pop = gpd.read_file(pop_data_file)
# Get columns of interest
data_pop = data_pop[["idINSPIRE", "ind_c"]]
df_pop = df_pop[["geometry", "idINSPIRE"]]
# Inner join to obtain population count data associated to each geometry
df_pop = pd.merge(df_pop, data_pop, how="inner", on="idINSPIRE")
# Rename population count column
df_pop.rename(
columns={"ind_c": "pop_count", "DN": "pop_count"}, inplace=True
)
return ox.project_gdf(df_pop, to_crs=to_crs)
def get_extract_population_data(
city_ref,
data_source,
pop_shapefile=None,
pop_data_file=None,
to_crs={"init": "epsg:4326"},
polygons_gdf=None,
):
"""Get data population extract of desired data source for input city,
calculating the convex hull of input buildings geodataframe
The population data frame is projected to the desired coordinate reference
system
Stores the extracted shapefile
Returns the stored population data for input 'data source' and 'city
reference' if it was previously stored
Parameters
----------
city_ref : string
name of input city
data_source : string
desired population data source
pop_shapefile : string
path of population count shapefile
pop_data_file : string
path of population data additional file (required for INSEE format)
to_crs : dict
desired coordinate reference system
polygons_gdf : geopandas.GeoDataFrame
polygons (e.g. buildings) for input region of interest which
will determine the shape to extract
Returns
----------
geopandas.GeoDataFrame
returns the extracted population data
"""
# Input data source type given?
assert data_source in DATA_SOURCES
# Population extract exists?
if os.path.exists(get_population_extract_filename(city_ref, data_source)):
log("Population extract exists for input city: " + city_ref)
return gpd.read_file(
get_population_extract_filename(city_ref, data_source)
)
# Input shape given?
assert not (np.all(polygons_gdf is None))
# Input population shapefile given?
assert pop_shapefile is not None
# All input files given?
assert not ((data_source == "insee") and (pop_data_file is None))
# Get buildings convex hull
polygon = GeometryCollection(
polygons_gdf.geometry.values.tolist()
).convex_hull
# Convert to geo-dataframe with defined CRS
poly_gdf = gpd.GeoDataFrame(
[polygon], columns=["geometry"], crs=polygons_gdf.crs
)
# Compute extract
df_pop = get_population_df(
pop_shapefile, pop_data_file, data_source, to_crs, poly_gdf
)
# Save to shapefile
df_pop.to_file(
get_population_extract_filename(city_ref, data_source),
driver="ESRI Shapefile",
)
return df_pop
| 31.875676 | 87 | 0.624216 | ###############
# Repository: https://github.com/lgervasoni/urbansprawl
# MIT License
###############
from shapely.geometry import GeometryCollection
import geopandas as gpd
import pandas as pd
import os
import numpy as np
import osmnx as ox
from osmnx import log
from .utils import get_population_extract_filename
DATA_SOURCES = ["insee", "gpw"]
##############################
# I/O for population data
##############################
def get_df_extract(df_data, poly_gdf, operation="within"):
"""
Indexes input geo-data frame within an input region of interest
If the region of interest is given as a polygon, its bounding box is indexed
Parameters
----------
df_data : geopandas.GeoDataFrame
input data frame to index
poly_gdf : geopandas.GeoDataFrame
geodataframe containing the region of interest in form of polygon
operation : string
the desired spatial join operation: 'within' or 'intersects'
Returns
----------
geopandas.GeoDataFrame
returns the population data frame indexed within the region of interest
"""
# Project to same system coordinates
poly_gdf = ox.project_gdf(poly_gdf, to_crs=df_data.crs)
# Spatial join
df_extract = gpd.sjoin(df_data, poly_gdf, op=operation)
# Keep original columns
df_extract = df_extract[df_data.columns]
return df_extract
def get_population_df(
pop_shapefile, pop_data_file, data_source, to_crs, poly_gdf
):
"""
Read the population shapefile from input filename/s
Index the data within the bounding box
Project to desired CRS
Parameters
----------
pop_shapefile : string
population count shapefile
pop_data_file : string
population data additional file (required for INSEE format)
data_source : string
desired population data source
to_crs : dict
desired coordinate reference system
poly_gdf : geopandas.GeoDataFrame
geodataframe containing the region of interest in form of polygon
Returns
----------
geopandas.GeoDataFrame
returns the indexed and projected population data frame
"""
#######################################
# Load GPW/INSEE population data
#######################################
# Read population data
df_pop = gpd.read_file(pop_shapefile)
# Extract region of interest (EPSG 4326)
# Filter geometries not contained in bounding box
df_pop = get_df_extract(df_pop, poly_gdf)
if data_source is "insee":
#######################################
# Additional step for INSEE data
#######################################
# Read dbf files
data_pop = gpd.read_file(pop_data_file)
# Get columns of interest
data_pop = data_pop[["idINSPIRE", "ind_c"]]
df_pop = df_pop[["geometry", "idINSPIRE"]]
# Inner join to obtain population count data associated to each geometry
df_pop = pd.merge(df_pop, data_pop, how="inner", on="idINSPIRE")
# Rename population count column
df_pop.rename(
columns={"ind_c": "pop_count", "DN": "pop_count"}, inplace=True
)
return ox.project_gdf(df_pop, to_crs=to_crs)
def get_extract_population_data(
city_ref,
data_source,
pop_shapefile=None,
pop_data_file=None,
to_crs={"init": "epsg:4326"},
polygons_gdf=None,
):
"""Get data population extract of desired data source for input city,
calculating the convex hull of input buildings geodataframe
The population data frame is projected to the desired coordinate reference
system
Stores the extracted shapefile
Returns the stored population data for input 'data source' and 'city
reference' if it was previously stored
Parameters
----------
city_ref : string
name of input city
data_source : string
desired population data source
pop_shapefile : string
path of population count shapefile
pop_data_file : string
path of population data additional file (required for INSEE format)
to_crs : dict
desired coordinate reference system
polygons_gdf : geopandas.GeoDataFrame
polygons (e.g. buildings) for input region of interest which
will determine the shape to extract
Returns
----------
geopandas.GeoDataFrame
returns the extracted population data
"""
# Input data source type given?
assert data_source in DATA_SOURCES
# Population extract exists?
if os.path.exists(get_population_extract_filename(city_ref, data_source)):
log("Population extract exists for input city: " + city_ref)
return gpd.read_file(
get_population_extract_filename(city_ref, data_source)
)
# Input shape given?
assert not (np.all(polygons_gdf is None))
# Input population shapefile given?
assert pop_shapefile is not None
# All input files given?
assert not ((data_source == "insee") and (pop_data_file is None))
# Get buildings convex hull
polygon = GeometryCollection(
polygons_gdf.geometry.values.tolist()
).convex_hull
# Convert to geo-dataframe with defined CRS
poly_gdf = gpd.GeoDataFrame(
[polygon], columns=["geometry"], crs=polygons_gdf.crs
)
# Compute extract
df_pop = get_population_df(
pop_shapefile, pop_data_file, data_source, to_crs, poly_gdf
)
# Save to shapefile
df_pop.to_file(
get_population_extract_filename(city_ref, data_source),
driver="ESRI Shapefile",
)
return df_pop
| 0 | 0 | 0 |
219f55a5acb6cf2f6e7f670cfe0a6f221de36ff3 | 3,591 | py | Python | lib/datasets/referit.py | BryanPlummer/phrase_detection | febe4d2e02a0467850cdf97fb3d3c3c5592be9a2 | [
"MIT"
] | 7 | 2019-11-15T13:16:55.000Z | 2021-11-10T18:19:58.000Z | lib/datasets/referit.py | BryanPlummer/phrase_detection | febe4d2e02a0467850cdf97fb3d3c3c5592be9a2 | [
"MIT"
] | 1 | 2021-09-07T13:28:49.000Z | 2021-09-07T13:28:49.000Z | lib/datasets/referit.py | BryanPlummer/phrase_detection | febe4d2e02a0467850cdf97fb3d3c3c5592be9a2 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Tensorflow Phrase Detection
# Licensed under The MIT License [see LICENSE for details]
# Written by Bryan Plummer based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
matplotlib.use('agg')
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
from model.config import cfg, get_output_vocab
import os.path as osp
import sys
import os
import numpy as np
import scipy.sparse
import scipy.io as sio
import pickle
import json
import uuid
import h5py
import string
| 32.645455 | 85 | 0.67335 | # --------------------------------------------------------
# Tensorflow Phrase Detection
# Licensed under The MIT License [see LICENSE for details]
# Written by Bryan Plummer based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
matplotlib.use('agg')
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
from model.config import cfg, get_output_vocab
import os.path as osp
import sys
import os
import numpy as np
import scipy.sparse
import scipy.io as sio
import pickle
import json
import uuid
import h5py
import string
class referit(imdb):
def __init__(self, word_embedding_dict, image_set, data=None):
imdb.__init__(self, 'referit_' + image_set, word_embedding_dict)
self._data = data
# name, paths
self._image_set = image_set
self._data_path = osp.join(cfg.DATA_DIR, 'referit')
self._classes = tuple(['__background__', '__phrase__'])
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_index = self._load_image_set_index()
# Default to roidb handler
self.set_proposal_method('gt')
self.set_roidb_info()
self._image_index = self._load_image_set_index()
def _load_image_set_index(self):
"""
Load image ids.
"""
ref_ids = self._data.getRefIds(split=self._image_set)
self._im_ids = list(set(self._data.getImgIds(ref_ids)))
return range(len(self._im_ids))
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
im_id = self._im_ids[self._image_index[i]]
im_fn = self._data.loadImgs(im_id)[0]['file_name']
return os.path.join(self._data_path, 'saiapr_tc-12', im_fn)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
image_to_ind = dict(list(zip(self._im_ids, list(range(self.num_images)))))
gt_roidb = [self._load_referit_annotation(image_to_ind, index)
for index in self._image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_referit_annotation(self, image_to_ind, image_index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
im_id = self._im_ids[self._image_index[image_index]]
refs = self._data.imgToRefs[im_id]
ref_ids = [ref['ref_id'] for ref in refs]
gt_phrases = []
gt_boxes = []
for ref_id, ref in zip(ref_ids, refs):
box = self._data.getRefBox(ref_id)
box = [box[0], box[1], box[0] + box[2], box[1] + box[3]]
for sent_annos in ref['sentences']:
gt_phrases.append(sent_annos['raw'].encode('ascii','ignore').lower())
gt_boxes.append(box)
if len(gt_boxes) > 0:
gt_boxes = np.vstack(gt_boxes)
return {'phrases': gt_phrases,
'boxes': gt_boxes,
'flipped': False}
| 586 | 2,279 | 23 |
f1bb3279a0638f314a5a5053219e49caf2ab56fb | 1,319 | py | Python | utils/radiate_dataset.py | BerensRWU/Radiate-Complex-YOLO | f9d7ccfe585f4285b6fb195e8211072ef433879b | [
"MIT"
] | null | null | null | utils/radiate_dataset.py | BerensRWU/Radiate-Complex-YOLO | f9d7ccfe585f4285b6fb195e8211072ef433879b | [
"MIT"
] | null | null | null | utils/radiate_dataset.py | BerensRWU/Radiate-Complex-YOLO | f9d7ccfe585f4285b6fb195e8211072ef433879b | [
"MIT"
] | null | null | null | from __future__ import division
import glob, os
import numpy as np
import cv2
import torch.utils.data as torch_data
import yaml
import utils.radiate_utils as radiate_utils
from utils.calibration import Calibration
| 29.977273 | 69 | 0.679303 | from __future__ import division
import glob, os
import numpy as np
import cv2
import torch.utils.data as torch_data
import yaml
import utils.radiate_utils as radiate_utils
from utils.calibration import Calibration
class RadiateDataset(torch_data.Dataset):
def __init__(self, root_dir, split='train'):
self.root_dir = root_dir
def get_radar(self, sample_dir):
assert os.path.exists(sample_dir), sample_dir
radar_cartesian = cv2.imread(sample_dir)
return radar_cartesian
def get_lidar(self, sample_dir, calib):
assert os.path.exists(sample_dir), sample_dir
lidar = radiate_utils.read_lidar(sample_dir)
lidar = radiate_utils.lidar_to_image(lidar, calib)
return lidar
def get_calib(self):
with open("config/default-calib.yaml", 'r') as file:
calib = yaml.full_load(file)
# generate calibration matrices from calib file
calib = Calibration(calib)
return calib
def get_label(self, sample_annot):
scene = sample_annot[0]
idx = sample_annot[1]
objects = radiate_utils.read_label(self.root_dir, scene, idx)
return objects
def __len__(self):
raise NotImplemented
def __getitem__(self, item):
raise NotImplemented
| 849 | 20 | 236 |
c44f44e958d4dc3376cab2ae19d9ac874ece080f | 518 | py | Python | examples/full/app/api/pilots/controllers.py | rbw0/flask-journey | 6181f59a7b5eef6a85b86ce6ed7d03c91f6bd285 | [
"MIT"
] | 14 | 2018-03-10T05:55:04.000Z | 2018-06-18T09:14:53.000Z | examples/full/app/api/pilots/controllers.py | rbw/flask-journey | 6181f59a7b5eef6a85b86ce6ed7d03c91f6bd285 | [
"MIT"
] | 6 | 2018-03-11T01:24:08.000Z | 2018-03-12T16:13:44.000Z | examples/full/app/api/pilots/controllers.py | rbw/flask-journey | 6181f59a7b5eef6a85b86ce6ed7d03c91f6bd285 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from flask import Blueprint
from flask_journey import route
from .services import get_pilots, get_pilot
from .schemas import pilot, pilots, query
bp = Blueprint('pilots', __name__)
@route(bp, '/<pilot_id>', methods=['GET'], marshal_with=pilot)
@route(bp, '/', methods=['GET'], _query=query, marshal_with=pilots, validate=False)
| 22.521739 | 83 | 0.720077 | # -*- coding: utf-8 -*-
from flask import Blueprint
from flask_journey import route
from .services import get_pilots, get_pilot
from .schemas import pilot, pilots, query
bp = Blueprint('pilots', __name__)
@route(bp, '/<pilot_id>', methods=['GET'], marshal_with=pilot)
def get_one(pilot_id):
return get_pilot(pilot_id)
@route(bp, '/', methods=['GET'], _query=query, marshal_with=pilots, validate=False)
def get_many(_query):
pilot_name = _query.data.get('name', None)
return get_pilots(pilot_name)
| 113 | 0 | 44 |
8fccc2b51a05ade08d1e523b5d619126a0c84acf | 220 | py | Python | setup.py | IdanAtias/redis-on-k8s | a20acaf44f37adcd41a1fc5c360fba1bacd2528e | [
"MIT"
] | null | null | null | setup.py | IdanAtias/redis-on-k8s | a20acaf44f37adcd41a1fc5c360fba1bacd2528e | [
"MIT"
] | null | null | null | setup.py | IdanAtias/redis-on-k8s | a20acaf44f37adcd41a1fc5c360fba1bacd2528e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
setup(
name="redisfe",
version="0.0.1",
packages=find_packages(),
entry_points={"console_scripts": ("redisfe=redisfe.main:main",)},
)
| 20 | 69 | 0.654545 | # -*- coding: utf-8 -*-
from setuptools import find_packages, setup
setup(
name="redisfe",
version="0.0.1",
packages=find_packages(),
entry_points={"console_scripts": ("redisfe=redisfe.main:main",)},
)
| 0 | 0 | 0 |
84d4a220f29f0e0bd994f0f18dd49fb101ca5d4f | 2,752 | py | Python | code/introduction/matplotlib-timeline.py | geo7/scientific-visualization-book | 71f6bac4db7ee2f26e88052fe7faa800303d8b00 | [
"BSD-2-Clause"
] | 2 | 2021-11-17T15:10:09.000Z | 2021-12-24T13:31:10.000Z | code/introduction/matplotlib-timeline.py | WuShichao/scientific-visualization-book | 389766215aa6b234ed1cf560a3768437d41d1d37 | [
"BSD-2-Clause"
] | 1 | 2021-12-12T11:37:48.000Z | 2021-12-12T11:39:00.000Z | code/introduction/matplotlib-timeline.py | WuShichao/scientific-visualization-book | 389766215aa6b234ed1cf560a3768437d41d1d37 | [
"BSD-2-Clause"
] | 2 | 2021-12-30T12:20:07.000Z | 2022-02-24T06:36:41.000Z | # ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: Nicolas P. Rougier
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5, 2))
ax = fig.add_subplot(111, xlim=(2002.5, 2021.5), ylim=(0, 6.5), yticks=([]))
ax.tick_params("x", labelsize="x-small", which="major")
plt.plot([2002.5, 2021.5], [0, 0], color="black", linewidth=1.0, clip_on=False)
X = np.arange(2003, 2022)
Y = np.zeros(len(X))
plt.scatter(
X,
Y,
s=50,
linewidth=1.0,
zorder=10,
clip_on=False,
edgecolor="black",
facecolor="white",
)
annotate(ax, 2021, 4, "3.4")
annotate(ax, 2020, 3, "3.3")
annotate(ax, 2019, 4, "3.2")
annotate(ax, 2019, 2, "3.1")
annotate(ax, 2018, 3, "3.0", y0=1.5)
annotate(ax, 2018, 1, "2.2", fc="#777777")
annotate(ax, 2017, 4, "2.1", y0=2.5)
annotate(ax, 2017, 2, "2.0")
annotate(ax, 2015, 2, "1.5")
annotate(ax, 2014, 1, "1.4")
annotate(ax, 2013, 2, "1.3")
annotate(ax, 2012, 1, "1.2")
annotate(ax, 2011, 3, "1.1", y0=2.5)
annotate(ax, 2011, 2, "1.0")
annotate(ax, 2009, 1, "0.99")
annotate(ax, 2003, 1, "0.10")
x0, x1 = 2002.5, 2011.9
ax.plot([x0, x1], [5, 5], color="black", linewidth=1, marker="|", clip_on=False)
ax.text((x0 + x1) / 2, 5.1, "J.D. Hunter", ha="center", va="bottom", size="x-small")
x0, x1 = 2012.1, 2017.9
ax.plot([x0, x1], [5, 5], color="black", linewidth=1, marker="|", clip_on=False)
ax.text((x0 + x1) / 2, 5.1, "M. Droettboom", ha="center", va="bottom", size="x-small")
x0, x1 = 2014.1, 2021.5
ax.plot([x0, x1 + 1], [6, 6], color="black", linewidth=1, marker="|")
ax.text((x0 + x1) / 2, 6.1, "T. Caswell", ha="center", va="bottom", size="x-small")
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.set_xticks(np.arange(2003, 2022, 2))
plt.tight_layout()
plt.savefig("../../figures/introduction/matplotlib-timeline.pdf")
plt.savefig("../../figures/introduction/matplotlib-timeline.png", dpi=300)
plt.show()
| 31.632184 | 86 | 0.560683 | # ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: Nicolas P. Rougier
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
def annotate(ax, x, y, text, fc="#ff7777", y0=0):
y = y - 0.5
ax.annotate(
" " + text + " ",
xy=(x, y),
xycoords="data",
xytext=(0, 12),
textcoords="offset points",
color="white",
size="x-small",
va="center",
ha="center",
weight="bold",
bbox=dict(boxstyle="round", fc=fc, ec="none"),
arrowprops=dict(
arrowstyle="wedge,tail_width=1.", fc=fc, ec="none", patchA=None
),
)
plt.plot([x, x], [y, y0], color="black", linestyle=":", linewidth=0.75)
fig = plt.figure(figsize=(5, 2))
ax = fig.add_subplot(111, xlim=(2002.5, 2021.5), ylim=(0, 6.5), yticks=([]))
ax.tick_params("x", labelsize="x-small", which="major")
plt.plot([2002.5, 2021.5], [0, 0], color="black", linewidth=1.0, clip_on=False)
X = np.arange(2003, 2022)
Y = np.zeros(len(X))
plt.scatter(
X,
Y,
s=50,
linewidth=1.0,
zorder=10,
clip_on=False,
edgecolor="black",
facecolor="white",
)
annotate(ax, 2021, 4, "3.4")
annotate(ax, 2020, 3, "3.3")
annotate(ax, 2019, 4, "3.2")
annotate(ax, 2019, 2, "3.1")
annotate(ax, 2018, 3, "3.0", y0=1.5)
annotate(ax, 2018, 1, "2.2", fc="#777777")
annotate(ax, 2017, 4, "2.1", y0=2.5)
annotate(ax, 2017, 2, "2.0")
annotate(ax, 2015, 2, "1.5")
annotate(ax, 2014, 1, "1.4")
annotate(ax, 2013, 2, "1.3")
annotate(ax, 2012, 1, "1.2")
annotate(ax, 2011, 3, "1.1", y0=2.5)
annotate(ax, 2011, 2, "1.0")
annotate(ax, 2009, 1, "0.99")
annotate(ax, 2003, 1, "0.10")
x0, x1 = 2002.5, 2011.9
ax.plot([x0, x1], [5, 5], color="black", linewidth=1, marker="|", clip_on=False)
ax.text((x0 + x1) / 2, 5.1, "J.D. Hunter", ha="center", va="bottom", size="x-small")
x0, x1 = 2012.1, 2017.9
ax.plot([x0, x1], [5, 5], color="black", linewidth=1, marker="|", clip_on=False)
ax.text((x0 + x1) / 2, 5.1, "M. Droettboom", ha="center", va="bottom", size="x-small")
x0, x1 = 2014.1, 2021.5
ax.plot([x0, x1 + 1], [6, 6], color="black", linewidth=1, marker="|")
ax.text((x0 + x1) / 2, 6.1, "T. Caswell", ha="center", va="bottom", size="x-small")
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.set_xticks(np.arange(2003, 2022, 2))
plt.tight_layout()
plt.savefig("../../figures/introduction/matplotlib-timeline.pdf")
plt.savefig("../../figures/introduction/matplotlib-timeline.png", dpi=300)
plt.show()
| 552 | 0 | 23 |
50bcb74ff72e68b78833e7e63920662728bd17d0 | 4,798 | py | Python | src/_speedup.py | Kefan-pauline/HER-CPRAND | 131a284a486ecc34baa7d1d766836ab7dda12087 | [
"MIT"
] | null | null | null | src/_speedup.py | Kefan-pauline/HER-CPRAND | 131a284a486ecc34baa7d1d766836ab7dda12087 | [
"MIT"
] | null | null | null | src/_speedup.py | Kefan-pauline/HER-CPRAND | 131a284a486ecc34baa7d1d766836ab7dda12087 | [
"MIT"
] | null | null | null | import tensorly as tl
import numpy as np
from src._als import als,nn_als
from src._herals import her_Als,nn_her_Als
from src._cprand import CPRAND, nn_CPRAND
from src._hercprand import her_CPRAND,nn_her_CPRAND
from src._base import init_factors,random_init_fac
import copy
import matplotlib.pyplot as plt
def speedup(list_N,r,list_S,list_P,tol,noise_level=0.1,scale=True,nn=False,nb_tensors=5):
"""
Calculate the speed up of her CPRAND vs ALS, her ALS and CPRAND
Parameters
----------
list_N : list
list of dimensions (in the increasing order)
r : int
rank of the tensor
list_S : list
list of the sample sizes, same length as list_P
list_P : list
list of the err sample sizes, same length as list_P
tol : double
tolerance for the 4 algorithms
noise_level : float, optional
noise_level of the tensor. The default is 0.1.
scale : boolean, optional
whether to scale the condition number of factors or not. The default is True.
nn : boolean, optional
use nn methods or not. The default is False.
Returns
-------
None.
"""
vsals = np.zeros((len(list_N),len(list_S)))
vsherals = np.zeros((len(list_N),len(list_S)))
vscprand = np.zeros((len(list_N),len(list_S)))
for i in range(len(list_N)) :
time_als = 0
time_herals = 0
time_hercprand = np.zeros(len(list_S))
time_cprand = np.zeros(len(list_S))
for k in range(nb_tensors):
fac_true,noise = init_factors(list_N[i], list_N[i], list_N[i], r,noise_level=noise_level,scale=scale,nn=nn)
t=tl.cp_to_tensor((None,fac_true))+noise
if k==0 :
factors=random_init_fac(t,r)
if nn==False :
weights2,factors2,it2,error2,time2=als(t,r,factors=copy.deepcopy(factors),it_max=10000,tol=tol,time_rec=True)
weights1,factors1,it1,error1,cpt1,time1=her_Als(t,r,factors=copy.deepcopy(factors),it_max=10000,tol=tol,time_rec=True)
else :
weights2,factors2,it2,error2,time2=nn_als(t,r,factors=copy.deepcopy(factors),it_max=10000,tol=tol,time_rec=True)
weights1,factors1,it1,error1,cpt1,time1=nn_her_Als(t,r,factors=copy.deepcopy(factors),it_max=10000,tol=tol,time_rec=True)
time_als += np.cumsum(time2)[it2-1]
time_herals += np.cumsum(time1)[it1-1]
for s in range(len(list_S)):
if(nn==False):
weights3,factors3,it3,error3,time3=CPRAND(t,r,list_S[s],list_P[s],factors=copy.deepcopy(factors),exact_err=False,it_max=10000,err_it_max=10000,tol=tol,time_rec=True)
weights4,factors4,it4,error4,cpt4,time4=her_CPRAND(t,r,list_S[s],list_P[s],factors=copy.deepcopy(factors),exact_err=False,it_max=10000,err_it_max=10000,tol=tol,time_rec=True)
else :
weights3,factors3,it3,error3,time3=nn_CPRAND(t,r,list_S[s],list_P[s],factors=copy.deepcopy(factors),exact_err=False,it_max=10000,err_it_max=10000,tol=tol,time_rec=True)
weights4,factors4,it4,error4,cpt4,time4=nn_her_CPRAND(t,r,list_S[s],list_P[s],factors=copy.deepcopy(factors),exact_err=False,it_max=10000,err_it_max=10000,tol=tol,time_rec=True)
time_hercprand[s] += np.cumsum(time4)[it4-1]
time_cprand[s] =+ np.cumsum(time3)[it3-1]
vsals[i,:] = time_als / copy.deepcopy(time_hercprand)
vsherals[i,:] =time_herals/copy.deepcopy(time_hercprand)
vscprand[i,:] =copy.deepcopy(time_cprand)/copy.deepcopy(time_hercprand)
# plot
plt.figure(0)
for s in range(len(list_S)):
legend = "S = " + str(list_S[s]) +" , P = " + str(list_P[s])
plt.plot(list_N, vsals[:,s],label=legend)
plt.axhline(y = 1, color = 'k',linestyle = '--',label="speed up = 1")
plt.xlabel('N')
plt.ylabel('Speed up factor')
plt.legend(loc='best')
plt.title('Speed up vs als')
plt.figure(1)
for s in range(len(list_S)):
legend = "S = " + str(list_S[s]) +" , P = " + str(list_P[s])
plt.plot(list_N,vsherals[:,s],label=legend)
plt.axhline(y = 1, color = 'k',linestyle = '--',label="speed up = 1")
plt.xlabel('N')
plt.ylabel('Speed up factor')
plt.legend(loc='best')
plt.title('Speed up vs herals')
plt.figure(2)
for s in range(len(list_S)):
legend = "S = " + str(list_S[s]) +" , P = " + str(list_P[s])
plt.plot(list_N,vscprand[:,s],label=legend)
plt.axhline(y = 1, color = 'k',linestyle = '--',label="speed up = 1")
plt.xlabel('N')
plt.ylabel('Speed up factor')
plt.legend(loc='best')
plt.title('Speed up vs cprand')
| 42.460177 | 198 | 0.626928 | import tensorly as tl
import numpy as np
from src._als import als,nn_als
from src._herals import her_Als,nn_her_Als
from src._cprand import CPRAND, nn_CPRAND
from src._hercprand import her_CPRAND,nn_her_CPRAND
from src._base import init_factors,random_init_fac
import copy
import matplotlib.pyplot as plt
def speedup(list_N,r,list_S,list_P,tol,noise_level=0.1,scale=True,nn=False,nb_tensors=5):
"""
Calculate the speed up of her CPRAND vs ALS, her ALS and CPRAND
Parameters
----------
list_N : list
list of dimensions (in the increasing order)
r : int
rank of the tensor
list_S : list
list of the sample sizes, same length as list_P
list_P : list
list of the err sample sizes, same length as list_P
tol : double
tolerance for the 4 algorithms
noise_level : float, optional
noise_level of the tensor. The default is 0.1.
scale : boolean, optional
whether to scale the condition number of factors or not. The default is True.
nn : boolean, optional
use nn methods or not. The default is False.
Returns
-------
None.
"""
vsals = np.zeros((len(list_N),len(list_S)))
vsherals = np.zeros((len(list_N),len(list_S)))
vscprand = np.zeros((len(list_N),len(list_S)))
for i in range(len(list_N)) :
time_als = 0
time_herals = 0
time_hercprand = np.zeros(len(list_S))
time_cprand = np.zeros(len(list_S))
for k in range(nb_tensors):
fac_true,noise = init_factors(list_N[i], list_N[i], list_N[i], r,noise_level=noise_level,scale=scale,nn=nn)
t=tl.cp_to_tensor((None,fac_true))+noise
if k==0 :
factors=random_init_fac(t,r)
if nn==False :
weights2,factors2,it2,error2,time2=als(t,r,factors=copy.deepcopy(factors),it_max=10000,tol=tol,time_rec=True)
weights1,factors1,it1,error1,cpt1,time1=her_Als(t,r,factors=copy.deepcopy(factors),it_max=10000,tol=tol,time_rec=True)
else :
weights2,factors2,it2,error2,time2=nn_als(t,r,factors=copy.deepcopy(factors),it_max=10000,tol=tol,time_rec=True)
weights1,factors1,it1,error1,cpt1,time1=nn_her_Als(t,r,factors=copy.deepcopy(factors),it_max=10000,tol=tol,time_rec=True)
time_als += np.cumsum(time2)[it2-1]
time_herals += np.cumsum(time1)[it1-1]
for s in range(len(list_S)):
if(nn==False):
weights3,factors3,it3,error3,time3=CPRAND(t,r,list_S[s],list_P[s],factors=copy.deepcopy(factors),exact_err=False,it_max=10000,err_it_max=10000,tol=tol,time_rec=True)
weights4,factors4,it4,error4,cpt4,time4=her_CPRAND(t,r,list_S[s],list_P[s],factors=copy.deepcopy(factors),exact_err=False,it_max=10000,err_it_max=10000,tol=tol,time_rec=True)
else :
weights3,factors3,it3,error3,time3=nn_CPRAND(t,r,list_S[s],list_P[s],factors=copy.deepcopy(factors),exact_err=False,it_max=10000,err_it_max=10000,tol=tol,time_rec=True)
weights4,factors4,it4,error4,cpt4,time4=nn_her_CPRAND(t,r,list_S[s],list_P[s],factors=copy.deepcopy(factors),exact_err=False,it_max=10000,err_it_max=10000,tol=tol,time_rec=True)
time_hercprand[s] += np.cumsum(time4)[it4-1]
time_cprand[s] =+ np.cumsum(time3)[it3-1]
vsals[i,:] = time_als / copy.deepcopy(time_hercprand)
vsherals[i,:] =time_herals/copy.deepcopy(time_hercprand)
vscprand[i,:] =copy.deepcopy(time_cprand)/copy.deepcopy(time_hercprand)
# plot
plt.figure(0)
for s in range(len(list_S)):
legend = "S = " + str(list_S[s]) +" , P = " + str(list_P[s])
plt.plot(list_N, vsals[:,s],label=legend)
plt.axhline(y = 1, color = 'k',linestyle = '--',label="speed up = 1")
plt.xlabel('N')
plt.ylabel('Speed up factor')
plt.legend(loc='best')
plt.title('Speed up vs als')
plt.figure(1)
for s in range(len(list_S)):
legend = "S = " + str(list_S[s]) +" , P = " + str(list_P[s])
plt.plot(list_N,vsherals[:,s],label=legend)
plt.axhline(y = 1, color = 'k',linestyle = '--',label="speed up = 1")
plt.xlabel('N')
plt.ylabel('Speed up factor')
plt.legend(loc='best')
plt.title('Speed up vs herals')
plt.figure(2)
for s in range(len(list_S)):
legend = "S = " + str(list_S[s]) +" , P = " + str(list_P[s])
plt.plot(list_N,vscprand[:,s],label=legend)
plt.axhline(y = 1, color = 'k',linestyle = '--',label="speed up = 1")
plt.xlabel('N')
plt.ylabel('Speed up factor')
plt.legend(loc='best')
plt.title('Speed up vs cprand')
| 0 | 0 | 0 |
07062acf91bc80ff83e15bd2102da27551a695f9 | 586 | py | Python | main.py | pizen/liturgical-today | bb141173bd37c2f2409dc74ce222dc62bfad844f | [
"MIT"
] | null | null | null | main.py | pizen/liturgical-today | bb141173bd37c2f2409dc74ce222dc62bfad844f | [
"MIT"
] | null | null | null | main.py | pizen/liturgical-today | bb141173bd37c2f2409dc74ce222dc62bfad844f | [
"MIT"
] | null | null | null | from datetime import date
from flask import abort, Flask, Response
import json
from pyliturgical import calendar
app = Flask(__name__)
@app.route('/reformed/<date_str>')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| 20.928571 | 52 | 0.667235 | from datetime import date
from flask import abort, Flask, Response
import json
from pyliturgical import calendar
app = Flask(__name__)
@app.route('/reformed/<date_str>')
def reformed(date_str):
try:
d = date.fromisoformat(date_str)
except Exception:
abort(400)
resp = Response(
json.dumps(calendar.lookup(d)),
status=200,
mimetype='application/json'
)
resp.cache_control.public = True
resp.cache_control.max_age = 86400
return resp
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| 309 | 0 | 22 |
46974bc9e27f73be1af4d1ab4cde572896bb9a44 | 9,212 | py | Python | code_captioning/class_ende.py | 201528014227051/ARNet | e7779d6af1a8990712d8e8e4a72e4c1ed138f60e | [
"MIT"
] | 9 | 2018-07-11T11:34:09.000Z | 2021-11-21T15:37:18.000Z | code_captioning/class_ende.py | 201528014227051/ARNet | e7779d6af1a8990712d8e8e4a72e4c1ed138f60e | [
"MIT"
] | null | null | null | code_captioning/class_ende.py | 201528014227051/ARNet | e7779d6af1a8990712d8e8e4a72e4c1ed138f60e | [
"MIT"
] | 2 | 2018-10-19T03:57:51.000Z | 2018-12-01T17:13:36.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import ipdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from classLSTMCore import LSTMCore
| 41.309417 | 119 | 0.606057 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import ipdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from classLSTMCore import LSTMCore
class EncodeDecode(nn.Module):
def __init__(self, opt):
super(EncodeDecode, self).__init__()
self.token_cnt = opt.token_cnt
self.word_cnt = opt.word_cnt
self.lstm_size = opt.lstm_size
self.drop_prob = opt.drop_prob
self.input_encoding_size = opt.input_encoding_size
self.encode_time_step = opt.code_truncate
self.decode_time_step = opt.comment_truncate
self.ss_prob = opt.ss_prob
self.encode_lstm = LSTMCore(self.input_encoding_size, self.lstm_size, self.drop_prob)
self.decode_lstm = LSTMCore(self.input_encoding_size, self.lstm_size, self.drop_prob)
self.embed = nn.Embedding(self.token_cnt + 1, self.input_encoding_size)
self.logit = nn.Linear(self.lstm_size, self.word_cnt)
self.init_weights()
def init_weights(self):
self.embed.weight.data.uniform_(-0.1, 0.1)
self.logit.weight.data.uniform_(-0.1, 0.1)
self.logit.bias.data.fill_(0)
def copy_weights(self, model_path):
src_weights = torch.load(model_path)
own_dict = self.state_dict()
for key, var in own_dict.items():
print("copy weights: {} size: {}".format(key, var.size()))
own_dict[key].copy_(src_weights[key])
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
init_h = Variable(weight.new(1, batch_size, self.lstm_size).zero_())
init_c = Variable(weight.new(1, batch_size, self.lstm_size).zero_())
init_state = (init_h, init_c)
return init_state
def forward(self, code_matrix, comment_matrix, current_comment_mask_cuda):
batch_size = code_matrix.size(0)
encode_state = self.init_hidden(batch_size)
decode_logit_seq = []
outputs = []
# encode
for i in range(self.encode_time_step):
encode_words = code_matrix[:, i].clone()
if code_matrix[:, i].data.sum() == 0:
break
encode_xt = self.embed(encode_words)
encode_output, encode_state = self.encode_lstm.forward(encode_xt, encode_state)
# decode
decode_state = (encode_state[0].clone(), encode_state[1].clone())
for i in range(self.decode_time_step):
if i >= 1 and self.ss_prob > 0.0:
sample_prob = current_comment_mask_cuda.data.new(batch_size).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = comment_matrix[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = comment_matrix[:, i].data.clone()
prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
it = Variable(it, requires_grad=False)
else:
it = comment_matrix[:, i].clone()
if i >= 1 and comment_matrix[:, i].data.sum() == 0:
break
decode_xt = self.embed(it)
decode_output, decode_state = self.decode_lstm.forward(decode_xt, decode_state)
decode_logit_words = F.log_softmax(self.logit(decode_output))
decode_logit_seq.append(decode_logit_words)
outputs.append(decode_logit_words)
decode_logit_seq = torch.cat([_.unsqueeze(1) for _ in decode_logit_seq], 1).contiguous()
return decode_logit_seq
def sample(self, code_matrix, init_index, eos_index):
batch_size = code_matrix.size(0)
encode_state = self.init_hidden(batch_size)
seq = []
seqLogprobs = []
logprobs_all = []
# encode
for i in range(self.encode_time_step):
encode_words = code_matrix[:, i].clone()
if code_matrix[:, i].data.sum() == 0:
break
encode_xt = self.embed(encode_words)
encode_output, encode_state = self.encode_lstm.forward(encode_xt, encode_state)
# decode
decode_state = (encode_state[0].clone(), encode_state[1].clone())
for i in range(self.decode_time_step):
if i == 0:
it = code_matrix.data.new(batch_size).long().fill_(init_index)
decode_xt = self.embed(Variable(it, requires_grad=False).cuda())
decode_output, decode_state = self.decode_lstm.forward(decode_xt, decode_state)
else:
max_logprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
if it.sum() == eos_index:
break
decode_xt = self.embed(Variable(it, requires_grad=False).cuda())
decode_output, decode_state = self.decode_lstm.forward(decode_xt, decode_state)
seq.append(it)
seqLogprobs.append(max_logprobs.view(-1))
logprobs = F.log_softmax(self.logit(decode_output))
logprobs_all.append(logprobs)
greedy_seq = torch.cat([_.unsqueeze(1) for _ in seq], 1).contiguous()
greedy_seq_probs = torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1).contiguous()
greedy_logprobs_all = torch.cat([_.unsqueeze(1) for _ in logprobs_all], 1).contiguous()
return greedy_seq, greedy_seq_probs, greedy_logprobs_all
def teacher_forcing_get_hidden_states(self, code_matrix, comment_matrix, comment_mask, eos_index):
batch_size = code_matrix.size(0)
encode_state = self.init_hidden(batch_size)
outputs = []
# encode 部分
encode_hidden_states = []
for i in range(self.encode_time_step):
encode_words = code_matrix[:, i].clone()
if code_matrix[:, i].data.sum() == 0:
break
encode_xt = self.embed(encode_words)
encode_output, encode_state = self.encode_lstm.forward(encode_xt, encode_state)
encode_hidden_states.append(encode_output)
# decode 部分
decode_state = (encode_state[0].clone(), encode_state[1].clone())
for i in range(self.decode_time_step):
if i >= 1 and self.ss_prob > 0.0:
sample_prob = comment_mask.data.new(batch_size).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = comment_matrix[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = comment_matrix[:, i].data.clone()
prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
it = Variable(it, requires_grad=False)
else:
it = comment_matrix[:, i].clone()
if it.cpu().data[0] == eos_index:
break
decode_xt = self.embed(it)
decode_output, decode_state = self.decode_lstm.forward(decode_xt, decode_state)
return decode_state[0]
def free_running_get_hidden_states(self, code_matrix, init_index, eos_index):
batch_size = code_matrix.size(0)
encode_state = self.init_hidden(batch_size)
seq = []
seqLogprobs = []
logprobs_all = []
# encode 部分
encode_hidden_states = []
for i in range(self.encode_time_step):
encode_words = code_matrix[:, i].clone()
if code_matrix[:, i].data.sum() == 0:
break
encode_xt = self.embed(encode_words)
encode_output, encode_state = self.encode_lstm.forward(encode_xt, encode_state)
encode_hidden_states.append(encode_output)
# decode 部分
decode_state = (encode_state[0].clone(), encode_state[1].clone())
for i in range(self.decode_time_step):
if i == 0:
it = code_matrix.data.new(batch_size).long().fill_(init_index)
decode_xt = self.embed(Variable(it, requires_grad=False).cuda())
decode_output, decode_state = self.decode_lstm.forward(decode_xt, decode_state)
else:
max_logprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
if it.cpu()[0] == eos_index:
break
decode_xt = self.embed(Variable(it, requires_grad=False).cuda())
decode_output, decode_state = self.decode_lstm.forward(decode_xt, decode_state)
seq.append(it)
seqLogprobs.append(max_logprobs.view(-1))
logprobs = F.log_softmax(self.logit(decode_output))
logprobs_all.append(logprobs)
return decode_state[0]
| 8,697 | 9 | 238 |
5d2fae479dc054bce8a8026cf03aca7d52d024b4 | 996 | py | Python | tests/test_graph_io.py | rhysnewell/spacegraphcats | e4d8b29171af0d1c8507066021be3b6a50c7802b | [
"BSD-3-Clause"
] | 96 | 2016-05-13T12:13:07.000Z | 2021-12-17T21:01:17.000Z | tests/test_graph_io.py | rhysnewell/spacegraphcats | e4d8b29171af0d1c8507066021be3b6a50c7802b | [
"BSD-3-Clause"
] | 421 | 2016-05-17T20:47:16.000Z | 2022-03-08T00:35:32.000Z | tests/test_graph_io.py | rhysnewell/spacegraphcats | e4d8b29171af0d1c8507066021be3b6a50c7802b | [
"BSD-3-Clause"
] | 17 | 2016-10-13T17:13:17.000Z | 2021-06-02T18:19:34.000Z | import unittest
from io import StringIO
from spacegraphcats.catlas.graph_io import read_from_gxt, write_to_gxt
from spacegraphcats.catlas.graph import Graph
if __name__ == "__main__":
unittest.main()
| 24.9 | 70 | 0.63755 | import unittest
from io import StringIO
from spacegraphcats.catlas.graph_io import read_from_gxt, write_to_gxt
from spacegraphcats.catlas.graph import Graph
class IOTest(unittest.TestCase):
def test_writing_and_reading(self):
f = StringIO()
graph = Graph(5)
graph.add_arc(1, 0).add_arc(2, 0).add_arc(3, 0).add_arc(4, 0)
write_to_gxt(f, graph, 1)
f.seek(0)
parsed = read_from_gxt(f, 5, True)
self.assertEqual(list(parsed.arcs()), list(graph.arcs()))
self.assertEqual(len(parsed), len(graph))
def test_writing_and_reading_no_weight(self):
f = StringIO()
graph = Graph(5)
graph.add_arc(1, 0).add_arc(2, 0).add_arc(3, 0).add_arc(4, 0)
write_to_gxt(f, graph)
f.seek(0)
parsed = read_from_gxt(f, 5, True)
self.assertEqual(list(parsed.arcs()), list(graph.arcs()))
self.assertEqual(len(parsed), len(graph))
if __name__ == "__main__":
unittest.main()
| 701 | 11 | 76 |
e8bd989197609c3dd25e513b44bbf56175e59919 | 16,715 | py | Python | models/bidi_rnn_iou_predictor_model.py | maksay/seq-train | 1af93c6e8e5db93a88c872a66546f6f4bd921551 | [
"MIT"
] | 11 | 2019-07-08T07:40:56.000Z | 2020-10-12T08:27:21.000Z | models/bidi_rnn_iou_predictor_model.py | maksay/seq-train | 1af93c6e8e5db93a88c872a66546f6f4bd921551 | [
"MIT"
] | 1 | 2019-07-09T02:23:08.000Z | 2019-07-09T02:23:08.000Z | models/bidi_rnn_iou_predictor_model.py | maksay/seq-train | 1af93c6e8e5db93a88c872a66546f6f4bd921551 | [
"MIT"
] | 3 | 2019-07-08T08:20:38.000Z | 2021-02-03T15:16:39.000Z | from models.base_model import BaseModel
import tensorflow as tf
import numpy as np
from label_storage import LabelStorage
from tqdm import tqdm
import time
from copy import deepcopy
# Three heads acting on the rnn output of size batchxlengthxoutput_size
# They predict IoU, whether the Gt exists, and the shift to GT bounding box
# IoU between two bounding boxes computation in TF
# such that IoU with GT could be optimized.
| 45.544959 | 80 | 0.494167 | from models.base_model import BaseModel
import tensorflow as tf
import numpy as np
from label_storage import LabelStorage
from tqdm import tqdm
import time
from copy import deepcopy
def sequence_embedding(input_seq,
feat_dim,
embedding_size,
rnn_cell_size,
dropout,
training,
layers):
batch_size = tf.shape(input_seq)[0]
trainable = True
with tf.variable_scope('batch_norm'):
input_seq_2d = tf.reshape(input_seq,
shape=(-1, feat_dim),
name='input_2d')
batch_norm = tf.layers.batch_normalization(input_seq_2d,
trainable=trainable,
training=training,
name='batch_norm',
axis=1)
with tf.variable_scope('embedding'):
embedding = tf.layers.dense(batch_norm,
trainable=trainable,
units=embedding_size,
activation=tf.nn.relu,
name='dense')
embedding = tf.layers.batch_normalization(embedding,
trainable=trainable,
training=training,
name='batch_norm',
axis=1)
embedding = tf.layers.dropout(embedding,
rate=dropout,
training=training,
name='dropout')
embedding = tf.reshape(embedding,
(batch_size, -1, embedding_size),
name='reshape')
with tf.variable_scope('rnn'):
cell_fw = tf.contrib.rnn.BasicLSTMCell(rnn_cell_size)
cell_bw = tf.contrib.rnn.BasicLSTMCell(rnn_cell_size)
(output_fw, output_bw), _ = \
tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw,
embedding,
dtype=tf.float32)
rnn_output_3d = tf.stack([output_fw, output_bw], axis=2)
if layers == 2:
with tf.variable_scope('rnn2'):
cell_fw2 = tf.contrib.rnn.BasicLSTMCell(rnn_cell_size)
cell_bw2 = tf.contrib.rnn.BasicLSTMCell(rnn_cell_size)
(output_fw2, output_bw2), _ = \
tf.nn.bidirectional_dynamic_rnn(cell_fw2, cell_bw2,
tf.reshape(rnn_output_3d,
(batch_size,
-1,
2 * rnn_cell_size)),
dtype=tf.float32)
rnn_output_3d = tf.stack([output_fw2, output_bw2], axis=2)
return rnn_output_3d
# Three heads acting on the rnn output of size batchxlengthxoutput_size
# They predict IoU, whether the Gt exists, and the shift to GT bounding box
def iou_prediction_head(rnn_output_3d, output_size):
batch_size = tf.shape(rnn_output_3d)[0]
rnn_output = tf.reshape(rnn_output_3d, (-1, output_size))
output_ious = tf.layers.dense(rnn_output, units=1, name='dense')[:, 0]
output_ious = tf.reshape(output_ious, (batch_size, -1))
return output_ious
def label_prediction_head(rnn_output_3d, output_size):
batch_size = tf.shape(rnn_output_3d)[0]
rnn_output = tf.reshape(rnn_output_3d, (-1, output_size))
output_labels = tf.layers.dense(rnn_output,
activation=tf.nn.sigmoid,
units=1,
name='dense')[:, 0]
output_labels = tf.reshape(output_labels, (batch_size, -1))
return output_labels
def bbox_shift_head(rnn_output_3d, output_size):
batch_size = tf.shape(rnn_output_3d)[0]
rnn_output = tf.reshape(rnn_output_3d, (-1, output_size))
output_bbox_shifts = tf.layers.dense(rnn_output,
units=4, name='dense')
output_bbox_shifts = tf.reshape(output_bbox_shifts, (batch_size, -1, 4))
return output_bbox_shifts
# IoU between two bounding boxes computation in TF
# such that IoU with GT could be optimized.
def bbox_iou(output_bboxes, label_bboxes):
bbox_lft = tf.maximum(output_bboxes[:, 0], label_bboxes[:, 0])
bbox_rgt = tf.minimum(output_bboxes[:, 0] + output_bboxes[:, 2],
label_bboxes[:, 0] + label_bboxes[:, 2])
bbox_up = tf.maximum(output_bboxes[:, 1], label_bboxes[:, 1])
bbox_dn = tf.minimum(output_bboxes[:, 1] + output_bboxes[:, 3],
label_bboxes[:, 1] + label_bboxes[:, 3])
bbox_dx = tf.maximum(0., bbox_rgt - bbox_lft)
bbox_dy = tf.maximum(0., bbox_dn - bbox_up)
bbox_inter = bbox_dx * bbox_dy
bbox_area = tf.add(tf.maximum(0., output_bboxes[:, 2]) *
tf.maximum(0., output_bboxes[:, 3]),
tf.maximum(0., label_bboxes[:, 2]) *
tf.maximum(0., label_bboxes[:, 3]))
bbox_iou = bbox_inter / (bbox_area - bbox_inter + 1e-6)
return bbox_iou
class BidiRNNIoUPredictorModel(BaseModel):
def __init__(self, config, mode, sess, ckpt_dir):
super(BidiRNNIoUPredictorModel, self).__init__(config, mode,
sess, ckpt_dir)
self.mode = mode
self.training = True if mode == 'train' else False
self.feat_dim = LabelStorage.instance.feature_dim()
with tf.variable_scope('inputs'):
self.input_seq = tf.placeholder(dtype=tf.float32,
shape=[None, None, self.feat_dim],
name='input_seq')
self.input_bboxes = tf.placeholder(dtype=tf.float32,
name='input_bboxes',
shape=[None, None, 4])
self.input_values = tf.placeholder(dtype=tf.float32,
name='input_bboxes',
shape=[None, None])
with tf.variable_scope("labels"):
self.label_bboxes = tf.placeholder(dtype=tf.float32,
shape=[None, None, 4],
name='label_bboxes')
self.label_values = tf.placeholder(dtype=tf.float32,
shape=[None, None],
name='label_values')
with tf.variable_scope("sequence_embedding"):
self.rnn_output_3d = sequence_embedding(self.input_seq,
self.feat_dim,
self.embedding_size,
self.rnn_cell_size,
self.dropout,
self.training,
self.layers)
self.outputs = {}
self.losses = {}
self.summaries = {}
if self.predict_ious == 1:
with tf.variable_scope('predict_ious'):
with tf.variable_scope('prediction'):
self.outputs["ious"] = \
iou_prediction_head(self.rnn_output_3d,
2 * self.rnn_cell_size)
label_ious_3d = bbox_iou(
tf.reshape(self.input_bboxes, (-1, 4)),
tf.reshape(self.label_bboxes, (-1, 4)))
label_ious = \
tf.reshape(label_ious_3d,
(tf.shape(self.label_bboxes)[0], -1))
with tf.variable_scope("loss"):
error_matrix = tf.square(self.outputs["ious"] - label_ious)
self.losses["iou_vector"] = \
tf.reduce_sum(error_matrix * self.input_values,
axis=1) / \
tf.reduce_sum(self.input_values, axis=1)
self.losses["iou"] = \
tf.reduce_sum(error_matrix * self.input_values) /\
tf.reduce_sum(self.input_values)
self.summaries["IoU_loss"] = self.losses["iou"]
if self.predict_labels == 1:
with tf.variable_scope('predict_labels'):
with tf.variable_scope('prediction'):
self.outputs["labels"] = \
label_prediction_head(self.rnn_output_3d,
2 * self.rnn_cell_size)
with tf.variable_scope("loss"):
error_matrix = tf.square(self.outputs["labels"] -
self.label_values)
self.losses["label_vector"] = tf.reduce_mean(error_matrix,
axis=1)
self.losses["label"] = tf.reduce_mean(error_matrix)
self.summaries["label_loss"] = self.losses["label"]
with tf.variable_scope('predict_bboxes'):
self.outputs["bboxes"] = self.input_bboxes
if self.predict_bboxes == 1:
with tf.variable_scope("prediction"):
self.outputs["bboxes"] += \
bbox_shift_head(self.rnn_output_3d,
2 * self.rnn_cell_size)
with tf.variable_scope("loss"):
self.label_ious_3d = bbox_iou(
tf.reshape(self.outputs["bboxes"], (-1, 4)),
tf.reshape(self.label_bboxes, (-1, 4)))
self.label_ious = \
tf.reshape(self.label_ious_3d,
(tf.shape(self.label_bboxes)[0], -1))
error_matrix = tf.square(1 - self.label_ious)
self.losses["bbox"] = \
tf.reduce_sum(error_matrix * self.input_values) /\
tf.reduce_sum(self.input_values)
self.losses["bbox_vector"] = \
tf.reduce_sum(error_matrix * self.input_values, axis=1) / \
tf.reduce_sum(self.input_values, axis=1)
self.summaries["bbox_loss"] = self.losses["bbox"]
with tf.variable_scope("optimizer"):
self.loss = 0
self.loss_vector = 0
for k, v in self.losses.items():
if not k.endswith("vector"):
self.loss += v
else:
self.loss_vector += v
self.summaries["loss"] = self.loss
self.optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = self.optimizer.minimize(self.loss,
global_step=
self.global_step)
def train_epoch(self, labeled_hypotheses, epoch, do_train=True,
do_save=True):
# Compute all features
LabelStorage.instance.get_hypo_features(labeled_hypotheses)
order = np.arange(len(labeled_hypotheses))
np.random.seed(epoch + 1)
np.random.shuffle(order)
summaries = {}
for k in self.summaries.keys():
summaries[k] = []
losses = []
t_start = time.time()
batch_range = range(0, len(labeled_hypotheses), self.batch_size)
for batch_start in tqdm(batch_range) if do_train else batch_range:
batch_end = min(batch_start + self.batch_size,
len(labeled_hypotheses))
# Arrange data in batches
cur_input_seq = np.stack([
labeled_hypotheses[idx].features
for idx in order[batch_start:batch_end]])
cur_input_bboxes = np.stack([
labeled_hypotheses[idx].input_bboxes
for idx in order[batch_start:batch_end]])
cur_input_values = np.stack([
labeled_hypotheses[idx].input_values
for idx in order[batch_start:batch_end]])
cur_label_values = np.stack([
labeled_hypotheses[idx].labels
for idx in order[batch_start:batch_end]])
cur_label_bboxes = np.stack([
labeled_hypotheses[idx].bboxes
for idx in order[batch_start:batch_end]])
feed_dict = {
self.input_seq: cur_input_seq,
self.input_bboxes: cur_input_bboxes,
self.input_values: cur_input_values,
self.label_values: cur_label_values,
self.label_bboxes: cur_label_bboxes
}
if do_train:
self.sess.run(self.train_op, feed_dict)
if do_save:
if time.time() - t_start > self._save_every_secs:
t_start = time.time()
self.save_model()
cur_summaries, lv = self.sess.run([self.summaries,
self.loss_vector],
feed_dict)
# compute average summaries in batches
for k, v in cur_summaries.items():
summaries[k].append(v)
losses.append(lv)
losses = np.concatenate(losses)
for k in summaries.keys():
summaries[k] = np.mean(np.asarray(summaries[k]))
if do_train and do_save:
self.save_model()
return summaries, losses
def _score(self, hypotheses):
# Given a set of hypotheses, feed them to a network in batches.
self.logger.info("Scoring %d hypotheses", len(hypotheses))
LabelStorage.instance.get_hypo_features(hypotheses)
for batch_start in range(0, len(hypotheses), self._eval_batch_size):
batch_end = min(batch_start + self._eval_batch_size,
len(hypotheses))
cur_input_seq = np.stack([
hypotheses[idx].features
for idx in range(batch_start, batch_end)])
cur_input_bboxes = np.stack([
hypotheses[idx].input_bboxes
for idx in range(batch_start, batch_end)])
cur_input_values = np.stack([
hypotheses[idx].input_values
for idx in range(batch_start, batch_end)])
feed_dict = {
self.input_seq: cur_input_seq,
self.input_bboxes: cur_input_bboxes,
self.input_values: cur_input_values
}
outputs = self.sess.run(self.outputs, feed_dict)
for idx in range(batch_end - batch_start):
cur_outputs = {}
for k, v in outputs.items():
cur_outputs[k] = \
outputs[k][idx]
hypotheses[batch_start + idx].outputs = cur_outputs
hypotheses[batch_start + idx].score = \
LabelStorage.instance.score(hypotheses[batch_start + idx],
cur_outputs)
def score(self, hypotheses):
# Scoring hypotheses with the model.
self._score(hypotheses)
if self.mode == "infer":
# During inference we take input hypothesis.
# Then we change it accoring to the shifts predicted by the network.
# Then we score it once again.
tmp = [deepcopy(h) for h in hypotheses]
for h, h2 in zip(hypotheses, tmp):
for did in range(len(h.tracklet)):
if h.tracklet[did] is not None:
h.outputs["old_ious"] = h.outputs["ious"]
h2.tracklet[did].bbox = \
h.outputs["bboxes"][did].reshape((1, 4))
h2.tracklet[did].features = None
delattr(h2, "features")
self._score(tmp)
for h, h2 in zip(hypotheses, tmp):
h.outputs["ious"] = h2.outputs["ious"]
| 16,022 | 21 | 243 |
46e6f5da826b8c139db5aa3d6a375bea6c1783d2 | 701 | py | Python | multauth/api/urls.py | andrenerd/django-multiform-authentication | 4a8b94ebd660cc7afc7dcdedcc12344ef85e6615 | [
"MIT"
] | 7 | 2020-08-28T16:17:02.000Z | 2021-11-11T18:01:20.000Z | multauth/api/urls.py | andrenerd/django-multiform-authentication | 4a8b94ebd660cc7afc7dcdedcc12344ef85e6615 | [
"MIT"
] | null | null | null | multauth/api/urls.py | andrenerd/django-multiform-authentication | 4a8b94ebd660cc7afc7dcdedcc12344ef85e6615 | [
"MIT"
] | 2 | 2021-01-06T04:11:28.000Z | 2021-05-19T14:43:52.000Z | from django.urls import include, path
from .me import views as me_views
from .auth import views as auth_views
from .services import urls as services_urls
app_name = 'multauth'
urlpatterns = [
path('me/', me_views.MeView.as_view(), name='me'),
path('me/password/', me_views.MePasswordView.as_view(), name='me-password'),
path('me/passcode/', me_views.MePasscodeView.as_view(), name='me-passcode'),
path('signin/', auth_views.SigninView.as_view(), name='signin'),
path('signup/', auth_views.SignupView.as_view(), name='signup'),
path('signup/verification/', auth_views.SignupVerificationView.as_view(), name='signup-verification'),
path(r'^', include(services_urls)),
]
| 35.05 | 106 | 0.71612 | from django.urls import include, path
from .me import views as me_views
from .auth import views as auth_views
from .services import urls as services_urls
app_name = 'multauth'
urlpatterns = [
path('me/', me_views.MeView.as_view(), name='me'),
path('me/password/', me_views.MePasswordView.as_view(), name='me-password'),
path('me/passcode/', me_views.MePasscodeView.as_view(), name='me-passcode'),
path('signin/', auth_views.SigninView.as_view(), name='signin'),
path('signup/', auth_views.SignupView.as_view(), name='signup'),
path('signup/verification/', auth_views.SignupVerificationView.as_view(), name='signup-verification'),
path(r'^', include(services_urls)),
]
| 0 | 0 | 0 |
759471eca6eb7bbbb400247ad8d624471bce9b4f | 979 | py | Python | tests/packerlicious/test_post_processor_docker.py | gnewson/packerlicious | 9a5373bc3a63f949e7912dad0214340d5fddbd85 | [
"Apache-2.0"
] | 109 | 2017-07-17T03:32:09.000Z | 2022-02-27T18:24:18.000Z | tests/packerlicious/test_post_processor_docker.py | gnewson/packerlicious | 9a5373bc3a63f949e7912dad0214340d5fddbd85 | [
"Apache-2.0"
] | 175 | 2017-07-16T21:41:40.000Z | 2021-03-19T22:28:19.000Z | tests/packerlicious/test_post_processor_docker.py | gnewson/packerlicious | 9a5373bc3a63f949e7912dad0214340d5fddbd85 | [
"Apache-2.0"
] | 68 | 2017-07-16T20:52:38.000Z | 2022-01-08T18:24:17.000Z | import pytest
import packerlicious.post_processor as post_processor
| 23.309524 | 53 | 0.694586 | import pytest
import packerlicious.post_processor as post_processor
class TestDockerImportPostProcessor(object):
def test_required_fields_missing(self):
b = post_processor.DockerImport()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
class TestDockerPushPostProcessor(object):
def test_no_required_fields(self):
b = post_processor.DockerPush()
b.to_dict()
class TestDockerSavePostProcessor(object):
def test_required_fields_missing(self):
b = post_processor.DockerSave()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
class TestDockerTagPostProcessor(object):
def test_required_fields_missing(self):
b = post_processor.DockerTag()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
| 621 | 85 | 200 |
c52b8d9492fbb8787f001b52ab150ed32d5cac19 | 35 | py | Python | labs/hello_world.py | MHSRoboticsCode/2015 | 410f427439d1641146329bfdd74667054a21a658 | [
"MIT"
] | null | null | null | labs/hello_world.py | MHSRoboticsCode/2015 | 410f427439d1641146329bfdd74667054a21a658 | [
"MIT"
] | null | null | null | labs/hello_world.py | MHSRoboticsCode/2015 | 410f427439d1641146329bfdd74667054a21a658 | [
"MIT"
] | null | null | null | # 2015 lab 1
print('Hello World')
| 8.75 | 20 | 0.657143 | # 2015 lab 1
print('Hello World')
| 0 | 0 | 0 |
5390903a6433e996b8622a4a7cf13953e3adb482 | 1,834 | py | Python | addons/easyship_delivery/models/easyship_service_charge.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | addons/easyship_delivery/models/easyship_service_charge.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | addons/easyship_delivery/models/easyship_service_charge.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | 1 | 2021-05-05T07:59:08.000Z | 2021-05-05T07:59:08.000Z | from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, Warning
| 52.4 | 133 | 0.735005 | from odoo import models, fields, api, _
from odoo.exceptions import ValidationError, Warning
class EasyshipServiceCharge(models.Model):
_name = "es.service.charge"
_description = 'EasyShip Service'
_order = 'total_charge, min_delivery_time, max_delivery_time'
_rec_name = 'courier_name'
@api.depends('min_delivery_time', 'max_delivery_time', 'es_service_id')
def _compute_delivery_time(self):
for record in self:
if record.min_delivery_time and record.max_delivery_time:
record.delivery_time = "%s - %s working days" % (record.min_delivery_time, record.max_delivery_time)
es_service_id = fields.Char("EasyShip Service ID", required=True, copy=False)
courier_name = fields.Char("Service", required=True, copy=False)
min_delivery_time = fields.Char("Min Delivery Time", copy=False)
max_delivery_time = fields.Char("Max Delivery Time", copy=False)
delivery_time = fields.Char("Delivery Time", compute="_compute_delivery_time", store=True)
shipment_charge = fields.Monetary("Shipping Cost", currency_field='currency_id', copy=False)
insurance_fee = fields.Monetary("Insurance Fee", currency_field='currency_id', copy=False)
total_charge = fields.Monetary("Total Charge", currency_field='currency_id', copy=False)
order_id = fields.Many2one("sale.order", string="Order", copy=False)
currency_id = fields.Many2one(related='order_id.currency_id', depends=['order_id'], store=True, string='Currency', readonly=True)
courier_does_pickup = fields.Boolean('Courier Does Pickup')
def set_delivery_line(self):
self.ensure_one()
self.order_id.delivery_rating_success = True
self.order_id.delivery_price = self.total_charge
self.order_id.es_service_id = self.id
self.order_id.set_delivery_line()
| 458 | 1,259 | 23 |
4ba48eedc7d1435806e6452e35a6fdf621660ae9 | 34,930 | py | Python | contents/character/generator/CharacterOccupation.py | jakenjarvis/Lakshmi | de805f7488c1a6b3a4e0d3804be7ecd6c814b446 | [
"Apache-2.0"
] | 1 | 2020-08-24T01:31:20.000Z | 2020-08-24T01:31:20.000Z | contents/character/generator/CharacterOccupation.py | jakenjarvis/Lakshmi | de805f7488c1a6b3a4e0d3804be7ecd6c814b446 | [
"Apache-2.0"
] | null | null | null | contents/character/generator/CharacterOccupation.py | jakenjarvis/Lakshmi | de805f7488c1a6b3a4e0d3804be7ecd6c814b446 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List, Dict
import math
import random
import aiohttp
import asyncio
import discord
from discord.ext import commands, tasks
from contents.character.Investigator import Investigator
| 50.696662 | 191 | 0.378586 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List, Dict
import math
import random
import aiohttp
import asyncio
import discord
from discord.ext import commands, tasks
from contents.character.Investigator import Investigator
class CharacterOccupation():
def __init__(self, occupation: str):
# 職業
self.occupation = occupation
# 職業別データ
self.occupations = {
"doctor_of_medicine" : {
"basename" : "医師",
"confirmed_list" : ["medicine", "first_aid", "credit_rating", "psychology", "psychoanalysis", "biology", "other_language(ラテン語)", "pharmacy"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"selfish", # 自己利益優先的な性格
"responsible", # 責任感のある性格
"serious", # 真面目で少々固い性格
"bravepatient", # 勇敢で我慢強い性格
"nervous", # 神経質で臆病な性格
"polite", # 礼儀正しく丁寧な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"gentle", # 穏やかで優しい性格
"honest", # 正直で嘘をつかない誠実な性格
"lazyloose", # 怠惰でだらしない性格
"entertaining", # 人を楽しませるような面白い性格
"evilchildish", # 意地悪で子供っぽい性格
"talkative", # おしゃべりで口数が多い性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"engineer" : {
"basename" : "エンジニア",
"confirmed_list" : ["chemistry", "mech_repair", "opr_hvy_machine", "electr_repair", "geology", "library_use", "physics"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"serious", # 真面目で少々固い性格
"honest", # 正直で嘘をつかない誠実な性格
"lazyloose", # 怠惰でだらしない性格
"nervous", # 神経質で臆病な性格
"polite", # 礼儀正しく丁寧な性格
"jealous", # 嫉妬しやすい性格
"responsible", # 責任感のある性格
"sensitive", # 繊細で傷つきやすい性格
"gentle", # 穏やかで優しい性格
"bravepatient", # 勇敢で我慢強い性格
"selfish", # 自己利益優先的な性格
"evilchildish", # 意地悪で子供っぽい性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"entertainer" : {
"basename" : "エンターテイナー",
"confirmed_list" : ["fast_talk", "dodge", "listen", "art(*)", "credit_rating", "psychology", "disguise"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"jealous", # 嫉妬しやすい性格
"polite", # 礼儀正しく丁寧な性格
"selfish", # 自己利益優先的な性格
"gentle", # 穏やかで優しい性格
"responsible", # 責任感のある性格
"bravepatient", # 勇敢で我慢強い性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"serious", # 真面目で少々固い性格
"honest", # 正直で嘘をつかない誠実な性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"professor" : {
"basename" : "教授",
"confirmed_list" : ["credit_rating", "psychology", "persuade", "library_use", "bargain", "other_language(*)"],
"2_choice_skills" : ["medicine", "chemistry", "archeology", "anthropology", "biology", "geology", "electronics", "astronomy", "natural_history", "physics", "law", "history"],
"undetermined_skills": 0,
"personalitys": [
"selfish", # 自己利益優先的な性格
"jealous", # 嫉妬しやすい性格
"serious", # 真面目で少々固い性格
"polite", # 礼儀正しく丁寧な性格
"gentle", # 穏やかで優しい性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"lazyloose", # 怠惰でだらしない性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"responsible", # 責任感のある性格
"bravepatient", # 勇敢で我慢強い性格
"honest", # 正直で嘘をつかない誠実な性格
"evilchildish", # 意地悪で子供っぽい性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"zealot" : {
"basename" : "狂信者",
"confirmed_list" : ["conceal", "hide", "psychology", "persuade", "library_use"],
"2_choice_skills" : ["chemistry", "electr_repair", "law", "pharmacy", "rifle"],
"undetermined_skills": 1,
"personalitys": [
"responsible", # 責任感のある性格
"serious", # 真面目で少々固い性格
"honest", # 正直で嘘をつかない誠実な性格
"bravepatient", # 勇敢で我慢強い性格
"nervous", # 神経質で臆病な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"evilchildish", # 意地悪で子供っぽい性格
"talkative", # おしゃべりで口数が多い性格
"entertaining", # 人を楽しませるような面白い性格
"gentle", # 穏やかで優しい性格
"polite", # 礼儀正しく丁寧な性格
"lazyloose", # 怠惰でだらしない性格
"selfish", # 自己利益優先的な性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"military_officer" : {
"basename" : "軍仕官",
"confirmed_list" : ["accounting", "credit_rating", "psychology", "persuade", "navigate", "bargain", "law"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"responsible", # 責任感のある性格
"bravepatient", # 勇敢で我慢強い性格
"polite", # 礼儀正しく丁寧な性格
"serious", # 真面目で少々固い性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"honest", # 正直で嘘をつかない誠実な性格
"gentle", # 穏やかで優しい性格
"nervous", # 神経質で臆病な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"selfish", # 自己利益優先的な性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"policeman" : {
"basename" : "警官",
"confirmed_list" : ["fast_talk", "first_aid", "dodge", "grapple", "psychology", "law"],
"2_choice_skills" : ["drive(自動車)", "ride", "bargain", "martial_arts", "spot_hidden"],
"undetermined_skills": 0,
"personalitys": [
"bravepatient", # 勇敢で我慢強い性格
"responsible", # 責任感のある性格
"polite", # 礼儀正しく丁寧な性格
"serious", # 真面目で少々固い性格
"honest", # 正直で嘘をつかない誠実な性格
"gentle", # 穏やかで優しい性格
"nervous", # 神経質で臆病な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"selfish", # 自己利益優先的な性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"police_detective" : {
"basename" : "刑事",
"confirmed_list" : ["fast_talk", "listen", "psychology", "persuade", "bargain", "law", "spot_hidden"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"responsible", # 責任感のある性格
"polite", # 礼儀正しく丁寧な性格
"serious", # 真面目で少々固い性格
"bravepatient", # 勇敢で我慢強い性格
"honest", # 正直で嘘をつかない誠実な性格
"gentle", # 穏やかで優しい性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"nervous", # 神経質で臆病な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"selfish", # 自己利益優先的な性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"artist" : {
"basename" : "芸術家",
"confirmed_list" : ["fast_talk", "art(*)", "photography", "psychology", "craft(*)", "spot_hidden", "history"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"evilchildish", # 意地悪で子供っぽい性格
"nervous", # 神経質で臆病な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"gentle", # 穏やかで優しい性格
"lazyloose", # 怠惰でだらしない性格
"honest", # 正直で嘘をつかない誠実な性格
"selfish", # 自己利益優先的な性格
"serious", # 真面目で少々固い性格
"talkative", # おしゃべりで口数が多い性格
"entertaining", # 人を楽しませるような面白い性格
"bravepatient", # 勇敢で我慢強い性格
"polite", # 礼儀正しく丁寧な性格
"responsible", # 責任感のある性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"antiquarian" : {
"basename" : "古物研究家",
"confirmed_list" : ["art(*)", "craft(*)", "library_use", "bargain", "other_language(*)", "spot_hidden", "history"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"jealous", # 嫉妬しやすい性格
"selfish", # 自己利益優先的な性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"serious", # 真面目で少々固い性格
"polite", # 礼儀正しく丁寧な性格
"gentle", # 穏やかで優しい性格
"responsible", # 責任感のある性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"lazyloose", # 怠惰でだらしない性格
"bravepatient", # 勇敢で我慢強い性格
"honest", # 正直で嘘をつかない誠実な性格
"evilchildish", # 意地悪で子供っぽい性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"author" : {
"basename" : "作家",
"confirmed_list" : ["occult", "psychology", "persuade", "library_use", "other_language(*)", "own_language(*)", "history"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"nervous", # 神経質で臆病な性格
"evilchildish", # 意地悪で子供っぽい性格
"sensitive", # 繊細で傷つきやすい性格
"jealous", # 嫉妬しやすい性格
"gentle", # 穏やかで優しい性格
"responsible", # 責任感のある性格
"selfish", # 自己利益優先的な性格
"lazyloose", # 怠惰でだらしない性格
"polite", # 礼儀正しく丁寧な性格
"talkative", # おしゃべりで口数が多い性格
"entertaining", # 人を楽しませるような面白い性格
"honest", # 正直で嘘をつかない誠実な性格
"serious", # 真面目で少々固い性格
"bravepatient", # 勇敢で我慢強い性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"journalist" : {
"basename" : "ジャーナリスト",
"confirmed_list" : ["fast_talk", "photography", "psychology", "persuade", "library_use", "own_language(*)", "history"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"bravepatient", # 勇敢で我慢強い性格
"responsible", # 責任感のある性格
"talkative", # おしゃべりで口数が多い性格
"entertaining", # 人を楽しませるような面白い性格
"serious", # 真面目で少々固い性格
"polite", # 礼儀正しく丁寧な性格
"gentle", # 穏やかで優しい性格
"nervous", # 神経質で臆病な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"lazyloose", # 怠惰でだらしない性格
"selfish", # 自己利益優先的な性格
"evilchildish", # 意地悪で子供っぽい性格
"honest", # 正直で嘘をつかない誠実な性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"private_investigator" : {
"basename" : "私立探偵",
"confirmed_list" : ["fast_talk", "locksmith", "photography", "psychology", "library_use", "bargain", "law"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"selfish", # 自己利益優先的な性格
"responsible", # 責任感のある性格
"polite", # 礼儀正しく丁寧な性格
"serious", # 真面目で少々固い性格
"bravepatient", # 勇敢で我慢強い性格
"honest", # 正直で嘘をつかない誠実な性格
"gentle", # 穏やかで優しい性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"nervous", # 神経質で臆病な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"spokesperson" : {
"basename" : "スポークスマン",
"confirmed_list" : ["fast_talk", "dodge", "credit_rating", "psychology", "persuade", "disguise"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"talkative", # おしゃべりで口数が多い性格
"polite", # 礼儀正しく丁寧な性格
"gentle", # 穏やかで優しい性格
"responsible", # 責任感のある性格
"serious", # 真面目で少々固い性格
"bravepatient", # 勇敢で我慢強い性格
"nervous", # 神経質で臆病な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"honest", # 正直で嘘をつかない誠実な性格
"selfish", # 自己利益優先的な性格
"entertaining", # 人を楽しませるような面白い性格
"lazyloose", # 怠惰でだらしない性格
"evilchildish", # 意地悪で子供っぽい性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"athlete" : {
"basename" : "スポーツ選手",
"confirmed_list" : ["dodge", "ride", "swim", "jump", "throw", "climb", "martial_arts"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"bravepatient", # 勇敢で我慢強い性格
"selfish", # 自己利益優先的な性格
"entertaining", # 人を楽しませるような面白い性格
"polite", # 礼儀正しく丁寧な性格
"gentle", # 穏やかで優しい性格
"talkative", # おしゃべりで口数が多い性格
"responsible", # 責任感のある性格
"honest", # 正直で嘘をつかない誠実な性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"serious", # 真面目で少々固い性格
"jealous", # 嫉妬しやすい性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"clergyman" : {
"basename" : "聖職者",
"confirmed_list" : ["listen", "accounting", "psychology", "persuade", "library_use", "other_language(*)", "history"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"honest", # 正直で嘘をつかない誠実な性格
"responsible", # 責任感のある性格
"serious", # 真面目で少々固い性格
"gentle", # 穏やかで優しい性格
"polite", # 礼儀正しく丁寧な性格
"nervous", # 神経質で臆病な性格
"jealous", # 嫉妬しやすい性格
"sensitive", # 繊細で傷つきやすい性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"evilchildish", # 意地悪で子供っぽい性格
"bravepatient", # 勇敢で我慢強い性格
"selfish", # 自己利益優先的な性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"parapsychologist" : {
"basename" : "超心理学者",
"confirmed_list" : ["occult", "anthropology", "photography", "psychology", "library_use", "history", "other_language(*)"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"serious", # 真面目で少々固い性格
"polite", # 礼儀正しく丁寧な性格
"gentle", # 穏やかで優しい性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"selfish", # 自己利益優先的な性格
"jealous", # 嫉妬しやすい性格
"lazyloose", # 怠惰でだらしない性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"responsible", # 責任感のある性格
"bravepatient", # 勇敢で我慢強い性格
"honest", # 正直で嘘をつかない誠実な性格
"evilchildish", # 意地悪で子供っぽい性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"dilettante" : {
"basename" : "ディレッタント",
"confirmed_list" : ["art(*)", "ride", "shotgun", "credit_rating", "craft(*)", "other_language(*)"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"nervous", # 神経質で臆病な性格
"selfish", # 自己利益優先的な性格
"sensitive", # 繊細で傷つきやすい性格
"jealous", # 嫉妬しやすい性格
"talkative", # おしゃべりで口数が多い性格
"polite", # 礼儀正しく丁寧な性格
"entertaining", # 人を楽しませるような面白い性格
"gentle", # 穏やかで優しい性格
"responsible", # 責任感のある性格
"serious", # 真面目で少々固い性格
"evilchildish", # 意地悪で子供っぽい性格
"honest", # 正直で嘘をつかない誠実な性格
"bravepatient", # 勇敢で我慢強い性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"missionary" : {
"basename" : "伝道者",
"confirmed_list" : ["medicine", "first_aid", "mech_repair", "art(*)", "persuade", "natural_history"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"talkative", # おしゃべりで口数が多い性格
"entertaining", # 人を楽しませるような面白い性格
"honest", # 正直で嘘をつかない誠実な性格
"polite", # 礼儀正しく丁寧な性格
"gentle", # 穏やかで優しい性格
"responsible", # 責任感のある性格
"bravepatient", # 勇敢で我慢強い性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"serious", # 真面目で少々固い性格
"jealous", # 嫉妬しやすい性格
"selfish", # 自己利益優先的な性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"tribal_member" : {
"basename" : "トライブ・メンバー",
"confirmed_list" : ["occult", "listen", "swim", "throw", "bargain", "natural_history", "spot_hidden"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"selfish", # 自己利益優先的な性格
"responsible", # 責任感のある性格
"entertaining", # 人を楽しませるような面白い性格
"bravepatient", # 勇敢で我慢強い性格
"polite", # 礼儀正しく丁寧な性格
"gentle", # 穏やかで優しい性格
"honest", # 正直で嘘をつかない誠実な性格
"serious", # 真面目で少々固い性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"jealous", # 嫉妬しやすい性格
"talkative", # おしゃべりで口数が多い性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"farmer_forester" : {
"basename" : "農林業作業者",
"confirmed_list" : ["first_aid", "mech_repair", "opr_hvy_machine", "craft(*)", "track", "electr_repair", "natural_history"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"responsible", # 責任感のある性格
"serious", # 真面目で少々固い性格
"bravepatient", # 勇敢で我慢強い性格
"selfish", # 自己利益優先的な性格
"jealous", # 嫉妬しやすい性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"honest", # 正直で嘘をつかない誠実な性格
"gentle", # 穏やかで優しい性格
"polite", # 礼儀正しく丁寧な性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"pilot" : {
"basename" : "パイロット",
"confirmed_list" : ["mech_repair", "opr_hvy_machine", "electr_repair", "pilot(*)", "astronomy", "navigate", "physics"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"responsible", # 責任感のある性格
"serious", # 真面目で少々固い性格
"talkative", # おしゃべりで口数が多い性格
"honest", # 正直で嘘をつかない誠実な性格
"nervous", # 神経質で臆病な性格
"bravepatient", # 勇敢で我慢強い性格
"gentle", # 穏やかで優しい性格
"polite", # 礼儀正しく丁寧な性格
"sensitive", # 繊細で傷つきやすい性格
"selfish", # 自己利益優先的な性格
"jealous", # 嫉妬しやすい性格
"entertaining", # 人を楽しませるような面白い性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"hacker_consultant" : {
"basename" : "ハッカー/コンサルタント",
"confirmed_list" : ["fast_talk", "computer", "electr_repair", "electronics", "library_use", "psychology", "other_language(*)"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"selfish", # 自己利益優先的な性格
"evilchildish", # 意地悪で子供っぽい性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"jealous", # 嫉妬しやすい性格
"lazyloose", # 怠惰でだらしない性格
"serious", # 真面目で少々固い性格
"entertaining", # 人を楽しませるような面白い性格
"responsible", # 責任感のある性格
"gentle", # 穏やかで優しい性格
"polite", # 礼儀正しく丁寧な性格
"honest", # 正直で嘘をつかない誠実な性格
"talkative", # おしゃべりで口数が多い性格
"bravepatient", # 勇敢で我慢強い性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"criminal" : {
"basename" : "犯罪者",
"confirmed_list" : ["fast_talk", "locksmith", "handgun", "sneak", "bargain", "disguise", "spot_hidden"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"lazyloose", # 怠惰でだらしない性格
"evilchildish", # 意地悪で子供っぽい性格
"selfish", # 自己利益優先的な性格
"talkative", # おしゃべりで口数が多い性格
"bravepatient", # 勇敢で我慢強い性格
"jealous", # 嫉妬しやすい性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"gentle", # 穏やかで優しい性格
"polite", # 礼儀正しく丁寧な性格
"serious", # 真面目で少々固い性格
"entertaining", # 人を楽しませるような面白い性格
"responsible", # 責任感のある性格
"honest", # 正直で嘘をつかない誠実な性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"soldier" : {
"basename" : "兵士",
"confirmed_list" : ["first_aid", "dodge", "conceal", "mech_repair", "listen", "sneak", "rifle"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"bravepatient", # 勇敢で我慢強い性格
"responsible", # 責任感のある性格
"polite", # 礼儀正しく丁寧な性格
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"nervous", # 神経質で臆病な性格
"serious", # 真面目で少々固い性格
"honest", # 正直で嘘をつかない誠実な性格
"jealous", # 嫉妬しやすい性格
"gentle", # 穏やかで優しい性格
"sensitive", # 繊細で傷つきやすい性格
"selfish", # 自己利益優先的な性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"lawyer" : {
"basename" : "弁護士",
"confirmed_list" : ["fast_talk", "credit_rating", "psychology", "persuade", "library_use", "bargain", "law"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"polite", # 礼儀正しく丁寧な性格
"talkative", # おしゃべりで口数が多い性格
"selfish", # 自己利益優先的な性格
"serious", # 真面目で少々固い性格
"responsible", # 責任感のある性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"honest", # 正直で嘘をつかない誠実な性格
"entertaining", # 人を楽しませるような面白い性格
"gentle", # 穏やかで優しい性格
"jealous", # 嫉妬しやすい性格
"bravepatient", # 勇敢で我慢強い性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"drifter" : {
"basename" : "放浪者",
"confirmed_list" : ["fast_talk", "hide", "listen", "sneak", "psychology", "bargain", "natural_history"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"lazyloose", # 怠惰でだらしない性格
"honest", # 正直で嘘をつかない誠実な性格
"nervous", # 神経質で臆病な性格
"selfish", # 自己利益優先的な性格
"jealous", # 嫉妬しやすい性格
"talkative", # おしゃべりで口数が多い性格
"entertaining", # 人を楽しませるような面白い性格
"evilchildish", # 意地悪で子供っぽい性格
"sensitive", # 繊細で傷つきやすい性格
"gentle", # 穏やかで優しい性格
"serious", # 真面目で少々固い性格
"polite", # 礼儀正しく丁寧な性格
"bravepatient", # 勇敢で我慢強い性格
"responsible", # 責任感のある性格
"unique", # とても変わった例えようのない珍しい性格
],
},
"musician" : {
"basename" : "ミュージシャン",
"confirmed_list" : ["fast_talk", "listen", "art(*)", "craft(*)", "persuade", "psychology", "bargain"],
"2_choice_skills" : [],
"undetermined_skills": 1,
"personalitys": [
"entertaining", # 人を楽しませるような面白い性格
"talkative", # おしゃべりで口数が多い性格
"nervous", # 神経質で臆病な性格
"sensitive", # 繊細で傷つきやすい性格
"selfish", # 自己利益優先的な性格
"jealous", # 嫉妬しやすい性格
"evilchildish", # 意地悪で子供っぽい性格
"lazyloose", # 怠惰でだらしない性格
"gentle", # 穏やかで優しい性格
"bravepatient", # 勇敢で我慢強い性格
"polite", # 礼儀正しく丁寧な性格
"responsible", # 責任感のある性格
"honest", # 正直で嘘をつかない誠実な性格
"serious", # 真面目で少々固い性格
"unique", # とても変わった例えようのない珍しい性格
],
},
}
def get_key(self) -> str:
return self.occupation
def get_name(self) -> str:
return self.occupations[self.occupation]["basename"]
def get_confirmed_list(self) -> List[str]:
return self.occupations[self.occupation]["confirmed_list"]
def get_2_choice_skills(self) -> List[str]:
return self.occupations[self.occupation]["2_choice_skills"]
def get_undetermined_skills(self) -> int:
return self.occupations[self.occupation]["undetermined_skills"]
def choice_personality(self, occupation: str) -> str:
# 職業別の性格リストから上位の物を優先的に選択する。
targetlist = self.occupations[occupation]["personalitys"]
weights = list(reversed([math.ceil(_ / 1) for _ in range(1, len(targetlist)+1)]))
return random.choices(targetlist, weights=weights, k=1)[0]
| 44,066 | 7 | 226 |
16dedf6977b981f036df8a226d0c212ce61fd47e | 4,570 | py | Python | src/passutil/pu.py | aaronstanek/password-generator | 68f6f2ea1721a2ed52333eff842580db3b0a5307 | [
"MIT"
] | 3 | 2020-08-16T22:55:38.000Z | 2022-01-24T23:31:01.000Z | src/passutil/pu.py | aaronstanek/password-generator | 68f6f2ea1721a2ed52333eff842580db3b0a5307 | [
"MIT"
] | null | null | null | src/passutil/pu.py | aaronstanek/password-generator | 68f6f2ea1721a2ed52333eff842580db3b0a5307 | [
"MIT"
] | null | null | null | # Copyright Aaron Stanek 2021
# See LICENSE for more details
import sys
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
raise Exception("Python Password Utility requires Python 3.6 or later. Compatibility with any major versions after Python 3 is not guaranteed.")
import hashlib
import secrets
import time
from .chars import normalize_valid_chars, create_character_map
# try to use SHA-3 if possible
# default to SHA-2 if you have to
if "sha3_512" in hashlib.algorithms_available:
SHA512 = lambda x : hashlib.sha3_512(x).digest()
SHA512_number = 3
else:
SHA512 = lambda x : hashlib.sha512(x).digest()
SHA512_number = 2
# this class is used to guarantee
# that the input to every hash
# is different
| 40.803571 | 149 | 0.645733 | # Copyright Aaron Stanek 2021
# See LICENSE for more details
import sys
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
raise Exception("Python Password Utility requires Python 3.6 or later. Compatibility with any major versions after Python 3 is not guaranteed.")
import hashlib
import secrets
import time
from .chars import normalize_valid_chars, create_character_map
# try to use SHA-3 if possible
# default to SHA-2 if you have to
if "sha3_512" in hashlib.algorithms_available:
SHA512 = lambda x : hashlib.sha3_512(x).digest()
SHA512_number = 3
else:
SHA512 = lambda x : hashlib.sha512(x).digest()
SHA512_number = 2
class UniqueCounter(object):
# this class is used to guarantee
# that the input to every hash
# is different
def __init__(self):
# set the internal state to a random integer
# 0 <= n < 2**128
self.n = 0
for i in range(16):
self.n = (self.n*256) + secrets.randbelow(256)
def __call__(self):
# return the internal state
# as a decimal number,
# in a bytes format.
# increment the internal state.
s = str(self.n) + ":"
self.n += 1
return s.encode("UTF-8")
def time_hash():
# a hash based on the current time
t = time.time()
t = "{:1.20f}".format(t)
# include 20 decimal points of the time
# this will include sub-precision garbage
t = t.encode("UTF-8")
return SHA512(t)
def generate_password(length,key,valid_chars):
if type(length) != int:
raise TypeError("length parameter must be int")
if length < 0:
raise ValueError("length parameter must be nonnegative")
if type(key) != bytes:
if type(key) == str:
key = key.encode("UTF-8")
else:
raise TypeError("key parameter must be bytes or str")
if len(key) < 1:
raise ValueError("key parameter has minimum length 1")
valid_chars = normalize_valid_chars(valid_chars)
if len(valid_chars) < 1:
raise ValueError("valid_chars parameter has minimum size 1")
char_map = create_character_map(valid_chars)
# length is a nonnegative integer
# key is a nonempty bytes object
# valid_chars is a nonempty set(int)
# it indicates which characters are allowed to be in the
# password, uses ascii codes
# char_map is a list of length 256
# it maps indicies to characters in valid_chars
# or to None
# SHA512 has an output size of 64 bytes
garbage = SHA512( b'initialize:' + key )
# garbage holds the state of the password generator
# it is called garbage because, while deterministicly generated,
# it should not have any sensible interpretation
counter = UniqueCounter()
for i in range(3):
# tumble the bits around
# but don't extract any password characters yet
garbage = SHA512( b'prefix:' + counter() + garbage + time_hash() + secrets.token_bytes(64) + key )
# the value of garbage should be sufficiently random at this point,
# totally disconnected from the input values
password = [] # store it as a list of ascii values, convert to a string later
while len(password) < length: # this is the password generation loop
# update garbage
garbage = SHA512( b'step:' + counter() + garbage + time_hash() + secrets.token_bytes(64) + key )
# use garbage to generate another sequence of bytes which will not
# have any effect on future values of garbage
candidate = SHA512( b'output:' + counter() + garbage + time_hash() + secrets.token_bytes(64) )
# candidate should have nothing in common with future or past values of garbage
# select a single value from those bytes
value = candidate[secrets.randbelow(len(candidate))]
# predicting value is very very difficult
# determining garbage from value requires inverting
# a SHA512 hash (a hash which isn't even known to
# a potential adversary because it never leaves this function)
# determing a value from the values before and after it
# requires at least partial knowledge of garbage
# now convert value to a usable character
value = char_map[value]
# value is now a valid character codepoint
# or None
if value is not None:
password.append(value)
# convert to a string
return bytes(password).decode("UTF-8")
| 3,664 | 7 | 129 |
632ddc4d9d2feb4191d7326623588355a5544aa5 | 12,824 | py | Python | dl_multi/archive/tfrecord.py | wbrandenburger/MTPIA | 02c773ce60b7efd5b15f270f047a6da5a8f00b7e | [
"MIT"
] | 1 | 2020-04-14T10:19:37.000Z | 2020-04-14T10:19:37.000Z | dl_multi/archive/tfrecord.py | wbrandenburger/MTPIA | 02c773ce60b7efd5b15f270f047a6da5a8f00b7e | [
"MIT"
] | null | null | null | dl_multi/archive/tfrecord.py | wbrandenburger/MTPIA | 02c773ce60b7efd5b15f270f047a6da5a8f00b7e | [
"MIT"
] | null | null | null | # ===========================================================================
# tfrecords_utils.py-------------------------------------------------------
# ===========================================================================
""" The following functions can be used to convert a value to a type compatible with tf.Example.
The tf.train.Feature message type can accept one of the following three types. Most other generic types can be coerced into one of these:
tf.train.BytesList : string / byte
tf.train.FloatList : float (float32) / double (float64)
tf.train.Int64List : bool / enum / int32 / uint32 / int64 / uint64
In order to convert a standard TensorFlow type to a tf.Example-compatible tf.train.Feature, you can use the shortcut functions below. Note that each function takes a scalar input value and returns a tf.train.Feature containing one of the three list types above.
"""
# import ------------------------------------------------------------------
# ---------------------------------------------------------------------------
from dl_multi.__init__ import _logger
import dl_multi.utils.general as glu
import dl_multi.utils.imgio
from dl_multi.utils import imgtools
import numpy as np
import pathlib
import tensorflow as tf
import tifffile
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def _bytes_feature(value, serialize=False):
"""Returns a bytes_list from a string / byte.
Parameters
----------
value : string / byte
Returns
-------
feature : bytes_list
Converted value compatible with tf.Example.
"""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
return feature if not serialize else feature.SerializeToString()
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def _float_feature(value, serialize=False):
"""Returns a float_list from a float / double.
Parameters
----------
value : float / double
Returns
-------
feature : float_list
Converted value compatible with tf.Example.
"""
feature = tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
return feature if not serialize else feature.SerializeToString()
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def _int64_feature(value, serialize=False):
"""Returns an int64_list from a bool / enum / int / uint.
Parameters
----------
value : double bool / enum / int / uint
Returns
-------
feature : int64_list
Converted value compatible with tf.Example.
"""
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
return feature if not serialize else feature.SerializeToString()
# Create a dictionary describing the features. The key of the dict should be the same with the key in writing function.
_feature_specs = {
"features" : {
"rows": tf.io.FixedLenFeature([], tf.int64),
"cols": tf.io.FixedLenFeature([], tf.int64),
"image": tf.io.FixedLenFeature([], tf.string),
"height": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.string)
},
"images" : [
{"spec": "image", "channels": 3, "type" : tf.uint8, "ext": ".tif"},
{"spec": "height", "channels": 1, "type" : tf.float32, "ext": ".tif"},
{"spec": "label", "channels": 1, "type" : tf.uint8, "ext": ".tif"}
]
}
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def write_old_tfrecord(files, param_specs, param_tfrecord, param_label=dict()):
"""Create a dictionary with features that may be relevant."""
_logger.debug("Start creation of tfrecords with settings:\nparam_tfrecord:\t{}\nparam_label:\t{}".format(param_tfrecord, param_label))
# settings ------------------------------------------------------------
# -----------------------------------------------------------------------
img_in = dl_multi.utils.imgio.get_data(files, param_specs, param_label=param_label)
tfrecord_file = glu.Folder().set_folder(**param_tfrecord["tfrecord"])
# execution -----------------------------------------------------------
# -----------------------------------------------------------------------
_logger.debug("[SAVE] '{}'".format(tfrecord_file))
with tf.io.TFRecordWriter(tfrecord_file) as writer:
for item in iter(img_in):
for item_spec in iter(item):
print(item_spec.path)
# img = item.spec("image").data
# tf_example = get_tfrecord_features(
# img.shape,
# img.tostring(),
# item.spec("height").data.tostring(),
# imgtools.labels_to_image(item.spec("label").data, param_label).tostring()
# )
# writer.write(tf_example.SerializeToString())
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def write_tfrecord(files, param_specs, param_tfrecord, param_label=dict()):
"""Create a dictionary with features that may be relevant."""
_logger.debug("Start creation of tfrecords with settings:\nparam_tfrecord:\t{}\nparam_label:\t{}".format(param_tfrecord, param_label))
# settings ------------------------------------------------------------
# -----------------------------------------------------------------------
img_in = dl_multi.utils.imgio.get_data(files, param_specs, param_label=param_label)
tfrecord_file = glu.Folder().set_folder(**param_tfrecord["tfrecord"])
# execution -----------------------------------------------------------
# -----------------------------------------------------------------------
_logger.debug("[SAVE] '{}'".format(tfrecord_file))
with tf.io.TFRecordWriter(tfrecord_file) as writer:
for data_set in iter(img_in):
# Create a dictionary describing the features. The key of the dict should be the same with the key in writing function.
shape = data_set.spec("image").data.shape
feature = {
"rows": _int64_feature(shape[0]),
"cols": _int64_feature(shape[1]),
}
for data_item in iter(data_set):
feature[data_item.spec] = _bytes_feature(data_item.data.tostring())
writer.write(tf.train.Example(
features=tf.train.Features(feature=feature)
).SerializeToString())
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def get_tfrecord_features(shape, image_string, height_string, mask_string):
"""Create a dictionary with features that may be relevant."""
# image_shape = tf.image.decode_jpeg(image_string).shape
# Create a dictionary describing the features. The key of the dict should be the same with the key in writing function.
feature = {
"rows": _int64_feature(shape[0]),
"cols": _int64_feature(shape[1]),
"image": _bytes_feature(image_string),
"height": _bytes_feature(height_string),
"label": _bytes_feature(mask_string),
}
return tf.train.Example(
features=tf.train.Features(
feature=feature)
)
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def read_tfrecord_attempt(tfrecord_queue):
"""Return image/annotation tensors that are created by reading tfrecord file.
The function accepts tfrecord filenames queue as an input which is usually
can be created using tf.train.string_input_producer() where filename
is specified with desired number of epochs. This function takes queue
produced by aforemention tf.train.string_input_producer() and defines
tensors converted from raw binary representations into
reshaped image/annotation tensors.
Parameters
----------
tfrecord_filenames_queue : tfrecord filename queue
String queue object from tf.train.string_input_producer()
Returns
-------
image, annotation : tuple of tf.int32 (image, annotation)
Tuple of image/annotation tensors
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(tfrecord_queue)
# Create a dictionary describing the features. The key of the dict should be the same with the key in writing function.
features = tf.io.parse_single_example(
serialized_example,
features={
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'data_raw': tf.io.FixedLenFeature([], tf.string),
'mask_raw': tf.io.FixedLenFeature([], tf.string)
}
)
image = tf.decode_raw(features['data_raw'], tf.float32)
annotation = tf.decode_raw(features['mask_raw'], tf.uint8)
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
image_shape = tf.stack([height, width, 4])
annotation_shape = tf.stack([height, width, 1])
image = tf.reshape(image, image_shape)
annotation = tf.reshape(annotation, annotation_shape)
return image, annotation | 42.889632 | 265 | 0.523316 | # ===========================================================================
# tfrecords_utils.py-------------------------------------------------------
# ===========================================================================
""" The following functions can be used to convert a value to a type compatible with tf.Example.
The tf.train.Feature message type can accept one of the following three types. Most other generic types can be coerced into one of these:
tf.train.BytesList : string / byte
tf.train.FloatList : float (float32) / double (float64)
tf.train.Int64List : bool / enum / int32 / uint32 / int64 / uint64
In order to convert a standard TensorFlow type to a tf.Example-compatible tf.train.Feature, you can use the shortcut functions below. Note that each function takes a scalar input value and returns a tf.train.Feature containing one of the three list types above.
"""
# import ------------------------------------------------------------------
# ---------------------------------------------------------------------------
from dl_multi.__init__ import _logger
import dl_multi.utils.general as glu
import dl_multi.utils.imgio
from dl_multi.utils import imgtools
import numpy as np
import pathlib
import tensorflow as tf
import tifffile
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def _bytes_feature(value, serialize=False):
"""Returns a bytes_list from a string / byte.
Parameters
----------
value : string / byte
Returns
-------
feature : bytes_list
Converted value compatible with tf.Example.
"""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
return feature if not serialize else feature.SerializeToString()
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def _float_feature(value, serialize=False):
"""Returns a float_list from a float / double.
Parameters
----------
value : float / double
Returns
-------
feature : float_list
Converted value compatible with tf.Example.
"""
feature = tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
return feature if not serialize else feature.SerializeToString()
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def _int64_feature(value, serialize=False):
"""Returns an int64_list from a bool / enum / int / uint.
Parameters
----------
value : double bool / enum / int / uint
Returns
-------
feature : int64_list
Converted value compatible with tf.Example.
"""
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
return feature if not serialize else feature.SerializeToString()
# Create a dictionary describing the features. The key of the dict should be the same with the key in writing function.
_feature_specs = {
"features" : {
"rows": tf.io.FixedLenFeature([], tf.int64),
"cols": tf.io.FixedLenFeature([], tf.int64),
"image": tf.io.FixedLenFeature([], tf.string),
"height": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.string)
},
"images" : [
{"spec": "image", "channels": 3, "type" : tf.uint8, "ext": ".tif"},
{"spec": "height", "channels": 1, "type" : tf.float32, "ext": ".tif"},
{"spec": "label", "channels": 1, "type" : tf.uint8, "ext": ".tif"}
]
}
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def write_old_tfrecord(files, param_specs, param_tfrecord, param_label=dict()):
"""Create a dictionary with features that may be relevant."""
_logger.debug("Start creation of tfrecords with settings:\nparam_tfrecord:\t{}\nparam_label:\t{}".format(param_tfrecord, param_label))
# settings ------------------------------------------------------------
# -----------------------------------------------------------------------
img_in = dl_multi.utils.imgio.get_data(files, param_specs, param_label=param_label)
tfrecord_file = glu.Folder().set_folder(**param_tfrecord["tfrecord"])
# execution -----------------------------------------------------------
# -----------------------------------------------------------------------
_logger.debug("[SAVE] '{}'".format(tfrecord_file))
with tf.io.TFRecordWriter(tfrecord_file) as writer:
for item in iter(img_in):
for item_spec in iter(item):
print(item_spec.path)
# img = item.spec("image").data
# tf_example = get_tfrecord_features(
# img.shape,
# img.tostring(),
# item.spec("height").data.tostring(),
# imgtools.labels_to_image(item.spec("label").data, param_label).tostring()
# )
# writer.write(tf_example.SerializeToString())
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def write_tfrecord(files, param_specs, param_tfrecord, param_label=dict()):
"""Create a dictionary with features that may be relevant."""
_logger.debug("Start creation of tfrecords with settings:\nparam_tfrecord:\t{}\nparam_label:\t{}".format(param_tfrecord, param_label))
# settings ------------------------------------------------------------
# -----------------------------------------------------------------------
img_in = dl_multi.utils.imgio.get_data(files, param_specs, param_label=param_label)
tfrecord_file = glu.Folder().set_folder(**param_tfrecord["tfrecord"])
# execution -----------------------------------------------------------
# -----------------------------------------------------------------------
_logger.debug("[SAVE] '{}'".format(tfrecord_file))
with tf.io.TFRecordWriter(tfrecord_file) as writer:
for data_set in iter(img_in):
# Create a dictionary describing the features. The key of the dict should be the same with the key in writing function.
shape = data_set.spec("image").data.shape
feature = {
"rows": _int64_feature(shape[0]),
"cols": _int64_feature(shape[1]),
}
for data_item in iter(data_set):
feature[data_item.spec] = _bytes_feature(data_item.data.tostring())
writer.write(tf.train.Example(
features=tf.train.Features(feature=feature)
).SerializeToString())
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def get_tfrecord_features(shape, image_string, height_string, mask_string):
"""Create a dictionary with features that may be relevant."""
# image_shape = tf.image.decode_jpeg(image_string).shape
# Create a dictionary describing the features. The key of the dict should be the same with the key in writing function.
feature = {
"rows": _int64_feature(shape[0]),
"cols": _int64_feature(shape[1]),
"image": _bytes_feature(image_string),
"height": _bytes_feature(height_string),
"label": _bytes_feature(mask_string),
}
return tf.train.Example(
features=tf.train.Features(
feature=feature)
)
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def read_tfrecord_queue(tfrecord_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(tfrecord_queue)
return get_img_from_tf_features_list(
tf.io.parse_single_example(serialized_example, features=_feature_specs["features"]),
_feature_specs["images"]
)
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def print_tfrecord(param_specs, param_out = dict()):
# Use dataset API to import date directly from TFRecord file.
data_raw = tf.data.TFRecordDataset(param_out["tfrecords"])
# Define the parse function to extract a single example as a dict.
def _parse_image_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_single_example(example_proto, _feature_specs["features"])
data_parsed = data_raw.map(_parse_image_function)
# If there are more than one example, use a for loop to read them out.
path = pathlib.Path("B:\\DLMulti\\images")
path.mkdir(parents=True, exist_ok=True)
for count, features in enumerate(data_parsed):
write_img_from_tf_features_list(
features,
_feature_specs["images"],
path, count
)
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def write_img_from_tf_features_list(features, features_list, path, count):
for item in features_list:
tifffile.imwrite(
path / "{}_{}{}".format(item["spec"], count, item["ext"]),
get_img_from_tf_features(
features[item["spec"]], item["channels"], item["type"],
tf.cast(features["rows"], tf.int32),
tf.cast(features["cols"], tf.int32)
).numpy()
)
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def get_img_from_tf_features_list(features, features_list):
return [
get_img_from_tf_features(
features[item["spec"]], item["channels"], item["type"],
tf.cast(features["rows"], tf.int32),
tf.cast(features["cols"], tf.int32)
) for item in features_list
]
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def get_img_from_tf_features(features, channels, dtype, rows, cols):
return tf.reshape(
tf.decode_raw(features, dtype),
tf.stack([rows, cols, channels])
)
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def read_tfrecord_attempt(tfrecord_queue):
"""Return image/annotation tensors that are created by reading tfrecord file.
The function accepts tfrecord filenames queue as an input which is usually
can be created using tf.train.string_input_producer() where filename
is specified with desired number of epochs. This function takes queue
produced by aforemention tf.train.string_input_producer() and defines
tensors converted from raw binary representations into
reshaped image/annotation tensors.
Parameters
----------
tfrecord_filenames_queue : tfrecord filename queue
String queue object from tf.train.string_input_producer()
Returns
-------
image, annotation : tuple of tf.int32 (image, annotation)
Tuple of image/annotation tensors
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(tfrecord_queue)
# Create a dictionary describing the features. The key of the dict should be the same with the key in writing function.
features = tf.io.parse_single_example(
serialized_example,
features={
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'data_raw': tf.io.FixedLenFeature([], tf.string),
'mask_raw': tf.io.FixedLenFeature([], tf.string)
}
)
image = tf.decode_raw(features['data_raw'], tf.float32)
annotation = tf.decode_raw(features['mask_raw'], tf.uint8)
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
image_shape = tf.stack([height, width, 4])
annotation_shape = tf.stack([height, width, 1])
image = tf.reshape(image, image_shape)
annotation = tf.reshape(annotation, annotation_shape)
return image, annotation | 2,016 | 0 | 110 |
fe6bb1216cba74208ecaf186e28830bf6199ceab | 298 | py | Python | TAO/Firewall/EXPLOITS/ELCO/fosho/requests/packages/oreos/core.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 46 | 2017-05-15T11:15:08.000Z | 2018-07-02T03:32:52.000Z | TAO/Firewall/EXPLOITS/ELCO/fosho/requests/packages/oreos/core.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | null | null | null | TAO/Firewall/EXPLOITS/ELCO/fosho/requests/packages/oreos/core.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 24 | 2017-05-17T03:26:17.000Z | 2018-07-09T07:00:50.000Z | # -*- coding: utf-8 -*-
"""
oreos.core
~~~~~~~~~~
The creamy white center.
"""
from .monkeys import SimpleCookie
def dict_from_string(s):
''''''
cookies = dict()
c = SimpleCookie()
c.load(s)
for k,v in c.items():
cookies.update({k: v.value})
return cookies | 11.92 | 36 | 0.550336 | # -*- coding: utf-8 -*-
"""
oreos.core
~~~~~~~~~~
The creamy white center.
"""
from .monkeys import SimpleCookie
def dict_from_string(s):
''''''
cookies = dict()
c = SimpleCookie()
c.load(s)
for k,v in c.items():
cookies.update({k: v.value})
return cookies | 0 | 0 | 0 |
73087379e75dba0f7af39e96d3b3fb511649fe52 | 1,189 | py | Python | tests/spec/Spec/get_op_for_request_test.py | terencehonles/bravado-core | 382db874b7b838dcfd169b0ce490d6a447ad6ff2 | [
"BSD-3-Clause"
] | 122 | 2015-04-22T17:31:18.000Z | 2021-11-08T10:29:57.000Z | tests/spec/Spec/get_op_for_request_test.py | terencehonles/bravado-core | 382db874b7b838dcfd169b0ce490d6a447ad6ff2 | [
"BSD-3-Clause"
] | 364 | 2015-04-10T22:19:23.000Z | 2022-02-25T08:55:10.000Z | tests/spec/Spec/get_op_for_request_test.py | terencehonles/bravado-core | 382db874b7b838dcfd169b0ce490d6a447ad6ff2 | [
"BSD-3-Clause"
] | 118 | 2015-04-20T15:11:53.000Z | 2021-12-09T10:03:34.000Z | # -*- coding: utf-8 -*-
from bravado_core.spec import Spec
| 34.970588 | 73 | 0.748528 | # -*- coding: utf-8 -*-
from bravado_core.spec import Spec
def test_found_with_no_basepath(petstore_dict):
del petstore_dict['basePath']
petstore_spec = Spec.from_dict(petstore_dict)
op = petstore_spec.get_op_for_request('GET', '/pet/{petId}')
assert op == petstore_spec.resources['pet'].operations['getPetById']
def test_not_found_with_no_basepath(petstore_dict):
del petstore_dict['basePath']
petstore_spec = Spec.from_dict(petstore_dict)
op = petstore_spec.get_op_for_request('GET', '/foo/{fooId}')
assert op is None
def test_found_with_basepath(petstore_spec, getPetByIdPetstoreOperation):
op = petstore_spec.get_op_for_request('GET', '/v2/pet/{petId}')
assert op == getPetByIdPetstoreOperation
def test_found_with_basepath_containing_trailing_slash(petstore_dict):
petstore_dict['basePath'] = '/v2/'
petstore_spec = Spec.from_dict(petstore_dict)
op = petstore_spec.get_op_for_request('GET', '/v2/pet/{petId}')
assert op == petstore_spec.resources['pet'].operations['getPetById']
def test_not_found_with_basepath(petstore_spec):
op = petstore_spec.get_op_for_request('GET', '/v2/foo/{fooId}')
assert op is None
| 1,010 | 0 | 115 |
c2c13dd46a699d262d613a09feddc7cbd6846840 | 26,334 | py | Python | btk20_src/lib/mkBeamforming.py | musiclvme/distant_speech_recognition | 60f867383488ac45c2fa3a5433736fdf00dd4f1d | [
"MIT"
] | 136 | 2018-12-06T06:35:44.000Z | 2022-03-27T15:07:42.000Z | btk20_src/lib/mkBeamforming.py | musiclvme/distant_speech_recognition | 60f867383488ac45c2fa3a5433736fdf00dd4f1d | [
"MIT"
] | 25 | 2018-12-03T04:33:24.000Z | 2021-07-28T22:01:37.000Z | btk20_src/lib/mkBeamforming.py | musiclvme/distant_speech_recognition | 60f867383488ac45c2fa3a5433736fdf00dd4f1d | [
"MIT"
] | 68 | 2019-01-08T06:33:30.000Z | 2021-11-17T09:33:10.000Z | import sys
import string
import numpy
from numpy import *
import os.path
import pickle
import re
from types import FloatType
import getopt, sys
import copy
import gzip
from btk.common import *
from btk.stream import *
from btk.feature import *
from btk.matrix import *
from btk.utils import *
#from pygsl import *
from pygsl import multiminimize
from pygsl import sf
import pygsl.errors as errors
from btk import dbase
from btk.modulated import *
from btk.subbandBeamforming import *
from btk.beamformer import *
APPZERO = 1.0E-20
# @memo fun_MK() and dfun_MK() are call back functions for pygsl.
# You can easily implement a new MK beamformer by writing a new class derived from
# a class 'MKSubbandBeamformer' which have methods, normalizeWa( wa ),
# calcKurtosis( srcX, fbinX, wa ) and gradient( srcX, fbinX, wa ).
# @class maximum empirical kurtosis beamformer
# usage:
# 1. construct an object, mkBf = MKSubbandBeamformerGGDr( spectralSources )
# 2. calculate the fixed weights, mkBf.calcFixedWeights( sampleRate, delay )
# 3. accumulate input vectors, mkBf.accumObservations( sFrame, eFrame, R )
# 4. calculate the covariance matricies of the inputs, mkBf.calcCov()
# 5. estimate active weight vectors, mkBf.estimateActiveWeights( fbinX, startpoint )
# @class maximum empirical kurtosis beamformer.
# The entire weight is normalized at each step in the steepest gradient algorithm.
# usage:
# 1. construct an object, mkBf = MEKSubbandBeamformer_nrm( spectralSources )
# 2. calculate the fixed weights, mkBf.calcFixedWeights( sampleRate, delay )
# 3. accumulate input vectors, mkBf.accumObservations( sFrame, eFrame, R )
# 4. calculate the covariance matricies of the inputs, mkBf.calcCov()
# 5. estimate active weight vectors, mkBf.estimateActiveWeights( fbinX, startpoint )
| 39.9 | 156 | 0.57853 | import sys
import string
import numpy
from numpy import *
import os.path
import pickle
import re
from types import FloatType
import getopt, sys
import copy
import gzip
from btk.common import *
from btk.stream import *
from btk.feature import *
from btk.matrix import *
from btk.utils import *
#from pygsl import *
from pygsl import multiminimize
from pygsl import sf
import pygsl.errors as errors
from btk import dbase
from btk.modulated import *
from btk.subbandBeamforming import *
from btk.beamformer import *
APPZERO = 1.0E-20
class MKSubbandBeamformer:
def __init__(self, spectralSources, NC, alpha, halfBandShift ):
# the number of sound sources
self._nSource = 1
self._logfp = 0
if NC > 2:
print 'not yet implemented in the case of NC > 2'
sys.exit()
if halfBandShift==True:
print "not support halfBandShift==True yet"
sys.exit(1)
self._halfBandShift = halfBandShift
self._NC = NC
# ouptputs of analysis filter banks
self._spectralSources = spectralSources
# the number of channels
self._nChan = len(spectralSources)
# fft length = the number of subbands
self._fftLen = spectralSources[0].fftLen()
# regularization term
self._alpha = alpha
# input vectors [frameN][chanN]
self._observations = []
# covariance matrix of input vectors [fftLen/2+1][chanN][chanN]
self._SigmaX = []
# quiescent vectors : _wq[nSource][fftLen2+1]
self._wq = []
# blocking matricies : _B[nSource][fftLen2+1]
self._B = []
# the entire GSC 's weight, wq - B * wa : _wo[nSource][fftLen2+1]
self._wo = []
for srcX in range(self._nSource):
self._wo.append( numpy.zeros( (self._fftLen/2+1,self._nChan), numpy.complex) )
def nextSpkr(self):
del self._observations
del self._SigmaX
del self._wq
del self._B
del self._wo
self._observations = []
self._SigmaX = []
self._wq = []
self._B = []
self._wo = []
for srcX in range(self._nSource):
self._wo.append( numpy.zeros( (self._fftLen/2+1,self._nChan), numpy.complex) )
if self._logfp != 0:
self._logfp.flush()
def openLogFile(self, logfilename, fbinXD = {50:True,100:True} ):
self._logfp = gzip.open(logfilename, 'w',1)
self._fbinXD4log = fbinXD
def closeLogFile(self):
if self._logfp != 0:
self._logfp.close()
def writeLogFile(self,msg):
if self._logfp != 0:
self._logfp.write(msg)
def accumObservations(self, sFrame, eFrame, R=1 ):
"""@brief accumulate observed subband components for adaptation """
"""@param sFrame: the start frame"""
"""@param eFrame: the end frame"""
"""@param R : R = 2**r, where r is a decimation factor"""
"""@return self._observations[frame][fftLen][chanN] : input subband snapshots"""
fftLen = self._fftLen
chanN = self._nChan
if R < 1:
R = 1
self._observations = []
# zero mean at this time... , mean = numpy.zeros(chanN).astype(numpy.complex)
snapShotArray = SnapShotArrayPtr( fftLen, chanN )
#print 'from %d to %d, fftLen %d' %( sFrame, eFrame, snapShotArray.fftLen() )
#for sX in range(sFrame,eFrame):
counter = 0
try:
for sX in range(eFrame):
ichan = 0
for analFB in self._spectralSources:
sbSample = numpy.array(analFB.next())
snapShotArray.newSample( sbSample, ichan )
ichan += 1
snapShotArray.update()
if sX >= sFrame and sX < eFrame :
X_t = [] # X_t[fftLen][chanN]
if sX % R == 0:
for fbinX in range(fftLen):
X_t.append( numpy.array( snapShotArray.getSnapShot(fbinX) ) )
# X_t.append( copy.deepcopy( snapShotArray.getSnapShot(fbinX) ) )
self._observations.append( X_t )
#print X_t
counter = sX
for analFB in self._spectralSources:
analFB.reset()
except :
print 'reach the end %d' %counter
return self._observations
#del snapShotArray
return self._observations
def calcCov(self):
"""@brief calculate covariance matricies of inputs over all frequency bins"""
"""@return the covariance matricies of input vectors : SigmaX[fftLen][chanN][chanN]"""
if len(self._observations) == 0:
print "Zero observation! Call getObservations() first!"
sys.exit()
samples = self._observations
frameN = len( samples )
fftLen = self._fftLen
fftLen2 = fftLen/2
chanN = self._nChan
SigmaX = numpy.zeros( (fftLen2+1,chanN,chanN), numpy.complex )
# zero mean at this time... , mean = numpy.zeros(chanN).astype(numpy.complex)
for sX in range(frameN):
for fbinX in range(fftLen2+1):
# zero mean assumption
SigmaX[fbinX] += numpy.outer( samples[sX][fbinX], conjugate(samples[sX][fbinX]) )
for fbinX in range(fftLen2+1):
SigmaX[fbinX] /= frameN
self._SigmaX = SigmaX
return self._SigmaX
def calcGSCOutput_f(self, wo, Xft ):
"""@breif calculate outputs of the GSC at a subband frequency bin"""
"""@param wo[nChan] : the entire beamformer's weight"""
"""@param Xft[nChan] : the input vector"""
"""@return an output value of a GSC beamformer at a subband frequency bin"""
"""@note this function supports half band shift only"""
wH = numpy.transpose( numpy.conjugate( wo ) )
Yt = numpy.dot( wH, Xft )
return Yt
def getSourceN(self):
return self._nSource
def getChanN(self):
return self._nChan
def getSampleN(self):
return len( self._observations )
def getFftLen(self):
return self._fftLen
def getWq(self, srcX, fbinX):
return self._wq[srcX][fbinX]
def getB(self, srcX, fbinX):
return self._B[srcX][fbinX]
def getAlpha(self):
return self._alpha
def setFixedWeights(self, wq, updateBlockingMatrix=False, norm=1 ):
# @brief set the given quiescent vectors.
# If the second argument is True, blocking matricies are re-calculated.
# @param wq : wq[srcX][fbinX][chanX]
# @param updateBlockingMatrix : True or False
fftLen2 = self._fftLen / 2
self._wq = []
if updateBlockingMatrix==True:
self._B = []
if self._NC == 1:
for srcX in range(self._nSource):
wq_n = []
if updateBlockingMatrix==True:
B_n = []
for fbinX in range(fftLen2+1):
wq_nf = numpy.zeros( self._nChan, numpy.complex )
for chanX in range(self._nChan):
wq_nf[chanX] = wq[srcX][fbinX][chanX] / norm
wq_n.append(wq_nf)
if updateBlockingMatrix==True:
B_nf = calcBlockingMatrix(wq_nf)
B_n.append(B_nf)
self._wq.append(wq_n)
if updateBlockingMatrix==True:
self._B.append(B_n)
else:
print 'not yet implemented in the case of NC > 2'
sys.exit()
def calcFixedWeights(self, sampleRate, delays ):
# @brief calculate the quiescent vectors and blocking matricies
# @param sampleRate : sampling rate (Hz)
# @param delays[nSource][nChan] :
fftLen2 = self._fftLen / 2
self._wq = []
self._B = []
if self._NC == 1:
for srcX in range(self._nSource):
wq_n = []
B_n = []
for fbinX in range(fftLen2+1):
wq_nf = calcArrayManifold_f( fbinX, self._fftLen, self._nChan, sampleRate, delays[0], self._halfBandShift )
B_nf = calcBlockingMatrix(wq_nf)
wq_n.append(wq_nf)
B_n.append(B_nf)
self._wq.append(wq_n)
self._B.append(B_n)
elif self._NC == 2:
wq1 = []
wq2 = []
B1 = []
B2 = []
for fbinX in range(fftLen2+1):
wds1 = calcArrayManifoldWoNorm_f( fbinX, self._fftLen, self._nChan, sampleRate, delays[0], self._halfBandShift)
wds2 = calcArrayManifoldWoNorm_f( fbinX, self._fftLen, self._nChan, sampleRate, delays[1], self._halfBandShift)
wq1_nf = calcNullBeamformer( wds1, wds2, self._NC )
wq2_nf = calcNullBeamformer( wds2, wds1, self._NC )
B1_nf = calcBlockingMatrix( wq1_nf, self._NC )
B2_nf = calcBlockingMatrix( wq2_nf, self._NC )
wq1.append(wq1_nf)
wq2.append(wq2_nf)
B1.append(B1_nf)
B2.append(B2_nf)
self._wq.append(wq1)
self._wq.append(wq2)
self._B.append(B1)
self._B.append(B2)
else:
print 'not yet implemented in the case of NC > 2'
sys.exit()
def UnpackWeights( self, waAs ):
"""@brief Unpack the active weight vector at a frequency bin"""
nSource = self._nSource
chanN = self._nChan
NC = self._NC
weights = []
idx = 0
for srcX in range(nSource):
waA = numpy.zeros(chanN-NC, numpy.complex)
for chanX in range(chanN-NC):
waA[chanX] = waAs[2 * chanX + idx ] + 1j * waAs[2 * chanX + 1 + idx]
weights.append( waA )
#print '|wa|', numpy.sqrt( dot(waA, conjugate(waA)) )
idx += ( 2 * (chanN - NC) )
return weights
# @memo fun_MK() and dfun_MK() are call back functions for pygsl.
# You can easily implement a new MK beamformer by writing a new class derived from
# a class 'MKSubbandBeamformer' which have methods, normalizeWa( wa ),
# calcKurtosis( srcX, fbinX, wa ) and gradient( srcX, fbinX, wa ).
def fun_MK(x, (fbinX, MKSubbandBeamformerPtr, NC) ):
# @brief calculate the objective function for the gradient algorithm
# @param x[2(chanN-NC)] : active weights (packed)
# @param fbinX: the frequency bin index you process
# @param MNSubbandBeamformerPtr: the class object for calculating functions
# @param NC: the number of constrants (not yet implemented)
chanN = MKSubbandBeamformerPtr.getChanN()
frameN = MKSubbandBeamformerPtr.getSampleN()
fftLen = MKSubbandBeamformerPtr.getFftLen()
sourceN = MKSubbandBeamformerPtr.getSourceN()
# Unpack current weights : x[2*nSource*(chanN - NC )] -> wa[nSource][chanN-NC]
wa = []
idx = 0
for srcX in range(sourceN):
wa.append( numpy.zeros( chanN-NC, numpy.complex) )
for chanX in range(chanN-NC):
wa[srcX][chanX] = x[2 * chanX+ idx] + 1j * x[2 * chanX + 1+ idx]
idx += ( 2 * (chanN - NC) )
wa = MKSubbandBeamformerPtr.normalizeWa( fbinX, wa )
# Calculate the objective function, the negative of the kurtosis
nkurt = 0.0
for srcX in range(sourceN):
nkurt -= MKSubbandBeamformerPtr.calcKurtosis( srcX, fbinX, wa )
# a regularization term
rterm = 0.0
alpha = MKSubbandBeamformerPtr.getAlpha()
for srcX in range(sourceN):
rterm += alpha * numpy.inner(wa, conjugate(wa))
nkurt += rterm.real
del wa
return nkurt
def dfun_MK(x, (fbinX, MKSubbandBeamformerPtr, NC ) ):
# @brief calculate the derivatives of the objective function for the gradient algorithm
# @param x[2(chanN-NC)] : active weights (packed)
# @param fbinX: the frequency bin index you process
# @param MKSubbandBeamformerPtr: the class object for calculating functions
# @param NC: the number of constrants
chanN = MKSubbandBeamformerPtr.getChanN()
frameN = MKSubbandBeamformerPtr.getSampleN()
fftLen = MKSubbandBeamformerPtr.getFftLen()
sourceN = MKSubbandBeamformerPtr.getSourceN()
# Unpack current weights : x[2*nSource*(chanN - NC )] -> wa[nSource][chanN-NC]
wa = []
idx = 0
for srcX in range(sourceN):
wa.append( numpy.zeros( chanN-NC, numpy.complex) )
for chanX in range(chanN-NC):
wa[srcX][chanX] = x[2 * chanX+ idx] + 1j * x[2 * chanX + 1+ idx]
idx += ( 2 * (chanN - NC) )
wa = MKSubbandBeamformerPtr.normalizeWa( fbinX, wa )
# Calculate a gradient
deltaWa = []
for srcX in range(sourceN):
deltaWa_n = - MKSubbandBeamformerPtr.gradient( srcX, fbinX, wa )
deltaWa.append( deltaWa_n )
# add the derivative of the regularization term
alpha = MKSubbandBeamformerPtr.getAlpha()
for srcX in range(sourceN):
deltaWa[srcX] += alpha * wa[srcX]
# Pack the gradient
grad = numpy.zeros(2 * sourceN * (chanN - NC), numpy.float)
idx = 0
for srcX in range(sourceN):
for chanX in range(chanN - NC):
grad[2*chanX+ idx] = deltaWa[srcX][chanX].real
grad[2*chanX + 1+ idx] = deltaWa[srcX][chanX].imag
idx += ( 2 * (chanN - NC) )
#if fbinX == 10:
# print 'grad', grad
del wa
return grad
def fdfun_MK(x, (fbinX, MKSubbandBeamformerPtr, NC ) ):
f = fun_MK(x, (fbinX, MKSubbandBeamformerPtr, NC ) )
df = dfun_MK(x, (fbinX, MKSubbandBeamformerPtr, NC ) )
return f, df
# @class maximum empirical kurtosis beamformer
# usage:
# 1. construct an object, mkBf = MKSubbandBeamformerGGDr( spectralSources )
# 2. calculate the fixed weights, mkBf.calcFixedWeights( sampleRate, delay )
# 3. accumulate input vectors, mkBf.accumObservations( sFrame, eFrame, R )
# 4. calculate the covariance matricies of the inputs, mkBf.calcCov()
# 5. estimate active weight vectors, mkBf.estimateActiveWeights( fbinX, startpoint )
class MEKSubbandBeamformer_pr(MKSubbandBeamformer):
def __init__(self, spectralSources, NC=1, alpha = 1.0E-02, beta = 3.0, halfBandShift=False ):
MKSubbandBeamformer.__init__(self, spectralSources, NC, alpha, halfBandShift )
self._beta = beta
self.resetStatistics()
def resetStatistics(self):
self._prevAvgY4 = numpy.zeros( (self._nSource,self._fftLen/2+1), numpy.float )
self._prevAvgY2 = numpy.zeros( (self._nSource,self._fftLen/2+1), numpy.float )
self._prevFrameN = numpy.zeros( (self._nSource,self._fftLen/2+1), numpy.int )
def storeStatistics(self, srcX, fbinX, wa_f):
frameN = len( self._observations )
self._prevFrameN[srcX][fbinX] += frameN
for frX in range(frameN):
self.calcEntireWeights_f( fbinX, wa_f )
Y = self.calcGSCOutput_f( self._wo[srcX][fbinX], self._observations[frX][fbinX] )
Y2 = Y * numpy.conjugate( Y )
Y4 = Y2 * Y2
self._prevAvgY2[srcX][fbinX] += ( Y2.real / self._prevFrameN[srcX][fbinX] )
self._prevAvgY4[srcX][fbinX] += ( Y4.real / self._prevFrameN[srcX][fbinX] )
#print 'Store %d : %e %e %d' %(fbinX,self._prevAvgY4[srcX][fbinX],self._prevAvgY2[srcX][fbinX],self._prevFrameN[srcX][fbinX])
def normalizeWa(self, fbinX, wa):
return wa
def calcEntireWeights_f(self, fbinX, wa_f ):
"""@breif calculate the entire weight vector of the beamformer for each bin"""
"""@param fbinX : the index of the subband frequency bin"""
"""@param wa_f[nSource][nChan-NC] """
for srcX in range(self._nSource):
self._wo[srcX][fbinX] = self._wq[srcX][fbinX] - numpy.dot( self._B[srcX][fbinX], wa_f[srcX] )
return self._wo
def calcKurtosis( self, srcX, fbinX, wa_f ):
# @brief calculate empirical kurtosis :
# \frac{1}{T} \sum_{t=0}^{T-1} Y^4 - 3 ( \frac{1}{T} \sum_{t=0}^{T-1} Y^2 )
# @param srcX: the source index you process
# @param fbinX : the index of the subband frequency bin"""
# @param wa_f[nSource][nChan-NC]
frameN = len( self._observations )
totalFrameN = self._prevFrameN[srcX][fbinX] + frameN
exY4 = ( self._prevAvgY4[srcX][fbinX] / totalFrameN ) * self._prevFrameN[srcX][fbinX]
exY2 = ( self._prevAvgY2[srcX][fbinX] / totalFrameN ) * self._prevFrameN[srcX][fbinX]
for frX in range(frameN):
self.calcEntireWeights_f( fbinX, wa_f )
Y = self.calcGSCOutput_f( self._wo[srcX][fbinX], self._observations[frX][fbinX] )
Y2 = Y * numpy.conjugate( Y )
Y4 = Y2 * Y2
exY2 += ( Y2.real / totalFrameN )
exY4 += ( Y4.real / totalFrameN )
kurt = exY4 - self._beta * exY2 * exY2
return kurt
def gradient( self, srcX, fbinX, wa_f ):
# @brief calculate the derivative of empirical kurtosis w.r.t. wa_H
# @param srcX: the source index you process
# @param fbinX : the index of the subband frequency bin"""
# @param wa_f[nSource][nChan-NC]
frameN = len( self._observations )
totalFrameN = self._prevFrameN[srcX][fbinX] + frameN
exY2 = ( self._prevAvgY2[srcX][fbinX] / totalFrameN ) * self._prevFrameN[srcX][fbinX]
dexY2 = numpy.zeros( ( self._nChan - self._NC ), numpy.complex )
dexY4 = numpy.zeros( ( self._nChan - self._NC ), numpy.complex )
BH = numpy.transpose( numpy.conjugate( self._B[srcX][fbinX] ) )
for frX in range(frameN):
self.calcEntireWeights_f( fbinX, wa_f )
Y = self.calcGSCOutput_f( self._wo[srcX][fbinX], self._observations[frX][fbinX] )
BHX = - numpy.dot( BH, self._observations[frX][fbinX] ) # BH * X
Y2 = Y * numpy.conjugate( Y )
dexY4 += ( 2 * Y2 * BHX * numpy.conjugate( Y ) / totalFrameN )
dexY2 += ( BHX * numpy.conjugate( Y ) / totalFrameN )
exY2 += ( Y2.real / totalFrameN )
deltaKurt = dexY4 - 2 * self._beta * exY2 * dexY2
del dexY2
del dexY4
return deltaKurt
def estimateActiveWeights( self, fbinX, startpoint, MAXITNS=40, TOLERANCE=1.0E-03, STOPTOLERANCE = 1.0E-02, DIFFSTOPTOLERANCE= 1.0E-05, STEPSIZE=0.01 ):
# @brief estimate active weight vectors at a frequency bin
# @param fbinX: the frequency bin index you process
# @param startpoint: the initial active weight vector
# @param NC: the number of constrants (not yet implemented)
# @param MAXITNS: the maximum interation for the gradient algorithm
# @param TOLERANCE : tolerance for the linear search
# @param STOPTOLERANCE : tolerance for the gradient algorithm
if fbinX > self._fftLen/2 :
print "fbinX %d > fftLen/2 %d?" %(fbinX,self._fftLen/2)
ndim = 2 * self._nSource * ( self._nChan - self._NC )
# initialize gsl functions
sys = multiminimize.gsl_multimin_function_fdf( fun_MK, dfun_MK, fdfun_MK, [fbinX, self, self._NC], ndim )
solver = multiminimize.conjugate_pr( sys, ndim )
solver.set(startpoint, STEPSIZE, TOLERANCE )
waAs = startpoint
#print "Using solver ", solver.name()
mi = 10000.0
preMi = 10000.0
for itera in range(MAXITNS):
try:
status1 = solver.iterate()
except errors.gsl_NoProgressError, msg:
print "No progress error %f" %mi
print msg
break
except:
print "Unexpected error:"
raise
gradient = solver.gradient()
waAs = solver.getx()
mi = solver.getf()
status2 = multiminimize.test_gradient( gradient, STOPTOLERANCE )
if fbinX % 10 == 0:
print 'EK %d %d %e' %(fbinX, itera, mi)
if status2==0 :
print 'EK Converged %d %d %e' %(fbinX, itera,mi)
break
diff = abs( preMi - mi )
if diff < DIFFSTOPTOLERANCE:
print 'EK Converged %d %d %e (%e)' %(fbinX, itera,mi, diff)
break
preMi = mi
#print '=== %d' %(fbinX)
return waAs
# @class maximum empirical kurtosis beamformer.
# The entire weight is normalized at each step in the steepest gradient algorithm.
# usage:
# 1. construct an object, mkBf = MEKSubbandBeamformer_nrm( spectralSources )
# 2. calculate the fixed weights, mkBf.calcFixedWeights( sampleRate, delay )
# 3. accumulate input vectors, mkBf.accumObservations( sFrame, eFrame, R )
# 4. calculate the covariance matricies of the inputs, mkBf.calcCov()
# 5. estimate active weight vectors, mkBf.estimateActiveWeights( fbinX, startpoint )
class MEKSubbandBeamformer_nrm(MEKSubbandBeamformer_pr):
def __init__(self, spectralSources, NC=1, alpha = 0.1, beta=3.0, gamma=-1.0, halfBandShift=False ):
MEKSubbandBeamformer_pr.__init__(self, spectralSources, NC, alpha, beta, halfBandShift )
self._gamma = gamma
def normalizeWeight( self, srcX, fbinX, wa ):
nrm_wa2 = numpy.inner(wa, conjugate(wa))
nrm_wa = sqrt( nrm_wa2.real )
if self._gamma < 0:
gamma = sqrt( numpy.inner(self._wq[srcX][fbinX],conjugate(self._wq[srcX][fbinX])) )
else:
gamma = self._gamma
if nrm_wa > abs(gamma) : # >= 1.0:
wa = abs(gamma) * wa / nrm_wa
return wa
def normalizeWa(self, fbinX, wa_f):
wa = []
for srcX in range(self._nSource):
wa.append( self.normalizeWeight( srcX, fbinX, wa_f[srcX] ) )
return wa
def calcEntireWeights_f(self, fbinX, wa_f ):
"""@breif calculate and normalize the entire weight vector of the beamformer for each bin"""
"""@param fbinX : the index of the subband frequency bin"""
"""@param wa_f[nSource][nChan-NC] """
for srcX in range(self._nSource):
wa = self.normalizeWeight( srcX, fbinX, wa_f[srcX] )
self._wo[srcX][fbinX] = self._wq[srcX][fbinX] - numpy.dot( self._B[srcX][fbinX], wa )
return self._wo
def estimateActiveWeights( self, fbinX, startpoint, MAXITNS=40, TOLERANCE=1.0E-03, STOPTOLERANCE = 1.0E-02, DIFFSTOPTOLERANCE= 1.0E-10, STEPSIZE=0.01 ):
# @brief estimate active weight vectors at a frequency bin
# @param fbinX: the frequency bin index you process
# @param startpoint: the initial active weight vector
# @param NC: the number of constrants (not yet implemented)
# @param MAXITNS: the maximum interation for the gradient algorithm
# @param TOLERANCE : tolerance for the linear search
# @param STOPTOLERANCE : tolerance for the gradient algorithm
if fbinX > self._fftLen/2 :
print "fbinX %d > fftLen/2 %d?" %(fbinX,self._fftLen/2)
ndim = 2 * self._nSource * ( self._nChan - self._NC )
# initialize gsl functions
sys = multiminimize.gsl_multimin_function_fdf( fun_MK, dfun_MK, fdfun_MK, [fbinX, self, self._NC], ndim )
solver = multiminimize.steepest_descent( sys, ndim )
solver.set(startpoint, STEPSIZE, TOLERANCE )
waAs = startpoint
#print "Using solver ", solver.name()
MINITERA = 2
mi = 10000.0
preMi = 10000.0
for itera in range(MAXITNS):
try:
status1 = solver.iterate()
except errors.gsl_NoProgressError, msg:
print "solver.iterate(): No progress error %d" %(fbinX)
print msg,mi
break
except:
print "solver.iterate(): Unexpected error:"
break
status2 = 0
try:
gradient = solver.gradient()
status2 = multiminimize.test_gradient( gradient, STOPTOLERANCE )
except errors.gsl_NoProgressError, msg:
print "multiminimize.test_gradient: No progress error %d" %(fbinX)
print msg,mi
break
except:
print "multiminimize.test_gradient: Unexpected error:"
break
waAs = solver.getx()
mi = solver.getf()
if self._logfp != 0:
if self._fbinXD4log.has_key(fbinX)==True:
msg = '%d: %d %e\n' %(fbinX, itera, mi)
self._logfp.write( msg )
if status2==0 and itera > MINITERA :
print 'Converged1 %d %d %e' %(fbinX, itera,mi)
if self._fbinXD4log.has_key(fbinX)==True:
msg = 'Converged1 %d %d %e\n' %(fbinX, itera,mi)
self._logfp.write( msg )
break
diff = abs( preMi - mi )
if diff < DIFFSTOPTOLERANCE and itera > MINITERA:
print 'Converged2 %d %d %e (%e)' %(fbinX, itera,mi, diff)
if self._fbinXD4log.has_key(fbinX)==True:
msg = 'Converged2 %d %d %e (%e)\n' %(fbinX, itera,mi, diff)
self._logfp.write( msg )
break
preMi = mi
#print '=== %d' %(fbinX)
# Unpack current weights and normalize them
wa = numpy.zeros( self._nChan - self._NC, numpy.complex)
for chanX in range( self._nChan - self._NC ):
wa[chanX] = waAs[2 * chanX] + 1j * waAs[2 * chanX + 1]
wa = self.normalizeWeight( 0, fbinX, wa )
self.storeStatistics( 0, fbinX, [wa] )
for chanX in range( self._nChan - self._NC ):
waAs[2*chanX] = wa[chanX].real
waAs[2*chanX + 1] = wa[chanX].imag
del wa
#print waAs
return waAs
| 18,654 | 5,728 | 135 |
729aa5c6d73fb9c3f5750c80708fb9fd5acd69bc | 312 | py | Python | esuits/answer_history/forms.py | junkhp/esuits_junki | 88293381d80184130adf5f6f96c47b9c79c294f2 | [
"MIT"
] | 2 | 2021-01-24T14:27:36.000Z | 2021-01-24T16:15:43.000Z | esuits/answer_history/forms.py | junkhp/esuits_junki | 88293381d80184130adf5f6f96c47b9c79c294f2 | [
"MIT"
] | 9 | 2021-02-01T03:20:59.000Z | 2021-03-06T08:15:04.000Z | esuits/answer_history/forms.py | junkhp/esuiets_junki | 88293381d80184130adf5f6f96c47b9c79c294f2 | [
"MIT"
] | 1 | 2021-02-07T03:41:01.000Z | 2021-02-07T03:41:01.000Z | # -*- coding: utf-8 -*-
from django import forms
| 22.285714 | 46 | 0.564103 | # -*- coding: utf-8 -*-
from django import forms
class AnswerHistoryCheckForm(forms.Form):
select = forms.ChoiceField(
required=True,
disabled=False,
widget=forms.RadioSelect(attrs={
'id': 'hisradio',
'class': 'ans-history-radio-input'
})
)
| 0 | 239 | 23 |
804b636892a7fa4562d5a1284dc781a81b3adfd3 | 1,433 | py | Python | splitter_bulk.py | rainyleaf/Lexical-Diversity | 8b01e9ab2661e0485e9079a7927f31701065c001 | [
"MIT"
] | null | null | null | splitter_bulk.py | rainyleaf/Lexical-Diversity | 8b01e9ab2661e0485e9079a7927f31701065c001 | [
"MIT"
] | null | null | null | splitter_bulk.py | rainyleaf/Lexical-Diversity | 8b01e9ab2661e0485e9079a7927f31701065c001 | [
"MIT"
] | null | null | null | import os
target_names = ['-to-process.txt.subbed', '_to_process.txt.subbed', '_to-process.txt.subbed', '-to_process.txt.subbed', '-tp.txt.subbed', '_tp.txt.subbed']
target = "-Processing"
for dirname, dirs, files in os.walk('.'):
if target in dirname and 'tagged' not in dirname:
for filename in files:
if any(filename.endswith(ending) for ending in target_names):
inputname = "/Users/Torri/Documents/Grad stuff/Thesis stuff/Data - Novels/Processing/" + dirname + "/" + filename
inputfile = open(inputname, 'r')
for ending in target_names:
if filename.endswith(ending):
new_filename = filename.replace(ending, '_split.txt')
new_filename = new_filename.replace(' ', '_')
new_filename = new_filename.replace(',', '')
new_filename = new_filename.replace('!', '')
print dirname + new_filename
new_file = open("/Users/Torri/Documents/Grad stuff/Thesis stuff/Data - Novels/Processing/" + dirname + "/" + new_filename, 'w')
for line in inputfile:
for word in line.split():
#word = word.lower()
word = word.rstrip('-\n\r\'.')
word = word.lstrip("\'")
print >>new_file, word
inputfile.close() | 49.413793 | 158 | 0.545708 | import os
target_names = ['-to-process.txt.subbed', '_to_process.txt.subbed', '_to-process.txt.subbed', '-to_process.txt.subbed', '-tp.txt.subbed', '_tp.txt.subbed']
target = "-Processing"
for dirname, dirs, files in os.walk('.'):
if target in dirname and 'tagged' not in dirname:
for filename in files:
if any(filename.endswith(ending) for ending in target_names):
inputname = "/Users/Torri/Documents/Grad stuff/Thesis stuff/Data - Novels/Processing/" + dirname + "/" + filename
inputfile = open(inputname, 'r')
for ending in target_names:
if filename.endswith(ending):
new_filename = filename.replace(ending, '_split.txt')
new_filename = new_filename.replace(' ', '_')
new_filename = new_filename.replace(',', '')
new_filename = new_filename.replace('!', '')
print dirname + new_filename
new_file = open("/Users/Torri/Documents/Grad stuff/Thesis stuff/Data - Novels/Processing/" + dirname + "/" + new_filename, 'w')
for line in inputfile:
for word in line.split():
#word = word.lower()
word = word.rstrip('-\n\r\'.')
word = word.lstrip("\'")
print >>new_file, word
inputfile.close() | 0 | 0 | 0 |
ade430af24e23c85d8a37decc033be928a493686 | 4,600 | py | Python | src/xinput.py | ypar/treqtl | 7c8ab7310edd83bc7f7950b45d4338341da07ce2 | [
"MIT"
] | null | null | null | src/xinput.py | ypar/treqtl | 7c8ab7310edd83bc7f7950b45d4338341da07ce2 | [
"MIT"
] | null | null | null | src/xinput.py | ypar/treqtl | 7c8ab7310edd83bc7f7950b45d4338341da07ce2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
###
# YoSon
# @treqtl/xinput.py
# produce xtreqtl input files by matching rs numbers from trait and iv summary statistics
###
import pandas as pd
import numpy as np
import os, sys
from sys import argv
from os import walk
from treqtl_input import read_dir
if __name__ == '__main__':
# if main, run test with input files
ewkdir = argv[1]
efile = argv[2]
gwkdir = argv[3]
outdf = xinput(ewkdir, efile, gwkdir)
| 50 | 356 | 0.619565 | #!/usr/bin/env python3
###
# YoSon
# @treqtl/xinput.py
# produce xtreqtl input files by matching rs numbers from trait and iv summary statistics
###
import pandas as pd
import numpy as np
import os, sys
from sys import argv
from os import walk
from treqtl_input import read_dir
def xinput(ewkdir, efile, gwkdir):
ewkdirs = ewkdir + '/'
gwkdirs = gwkdir + '/'
edf = pd.read_csv(efile, delim_whitespace=True, header=0, na_values='NA')
efilename = efile.replace(ewkdirs, '')
efilename = efilename.replace('.txt', '_w_')
gfilelist = read_dir(gwkdir)
for gfile in gfilelist:
gdf = pd.read_csv(gfile, delim_whitespace=True, header=0, na_values='NA', names= ['RSNUM', 'SNPID', 'CHR', 'POS', 'A1', 'A2', 'INC_ALLELE', 'INC_AFRQ', 'BETA', 'SE', 'PVAL'], dtype = {'RSNUM': str, 'SNPID': str, 'CHR': str, 'POS': str, 'A1': str, 'A2': str, 'INC_ALLELE': str, 'INC_AFRQ': str, 'BETA': np.float32, 'SE': np.float32, 'PVAL': np.float32})
gdf['CHR'] = gdf['CHR'].replace('.0', '')
gdf['POS'] = gdf['POS'].replace('.0', '')
gfilename = gfile.replace(gwkdirs, '')
outfile = str(efilename) + str(gfilename)
merged = edf.merge(gdf, suffixes=('_e', '_g'), how='inner', left_on='RSNUM', right_on='RSNUM', left_index=False, right_index=False)
merged.columns = ['GENE', 'i', 'tier', 'RSNUM', 'CHR', 'POS_HG19', 'A1_e', 'A2_e', 'INC_AFRQ_e', 'BETA_e', 'SE_e', 'LOGP', 'SNPID', 'CHR_g', 'POS_g', 'A1_g', 'A2_g', 'INC_ALLELE', 'INC_AFRQ_g', 'BETA_g', 'SE_g', 'PVAL']
merged = merged[['GENE', 'RSNUM', 'CHR', 'POS_HG19', 'A1_e', 'A2_e', 'INC_AFRQ_e', 'BETA_e', 'SE_e', 'LOGP', 'SNPID', 'A1_g', 'A2_g', 'INC_ALLELE', 'INC_AFRQ_g', 'BETA_g', 'SE_g', 'PVAL']]
merged['A1_g'] = merged['A1_g'].str.upper()
merged['A2_g'] = merged['A2_g'].str.upper()
merged['INC_ALLELE'] = merged['INC_ALLELE'].str.upper()
mdf = merged[(merged.A1_e == merged.A1_g) & (merged.A2_e == merged.A2_g) & (merged.A1_e == merged.INC_ALLELE)]
mdf = mdf.reset_index(drop=True)
mdf0 = merged[(merged.A1_e == merged.A2_g) & (merged.A2_e == merged.A1_g) & (merged.A1_e == merged.INC_ALLELE)]
mdf0 = mdf0.reset_index(drop=True)
mdf0 = mdf0[['GENE', 'RSNUM', 'CHR', 'POS_HG19', 'A1_e', 'A2_e', 'INC_AFRQ_e', 'BETA_e', 'SE_e', 'LOGP', 'SNPID', 'A2_g', 'A1_g', 'INC_ALLELE', 'INC_AFRQ_g', 'BETA_g', 'SE_g', 'PVAL']]
mdf0.columns = ['GENE', 'RSNUM', 'CHR', 'POS_HG19', 'A1_e', 'A2_e', 'INC_AFRQ_e', 'BETA_e', 'SE_e', 'LOGP', 'SNPID', 'A1_g', 'A2_g', 'INC_ALLELE', 'INC_AFRQ_g', 'BETA_g', 'SE_g', 'PVAL']
# some summary statistics have minor/major coding, etc. rather than alt/ref
# match alleles and compare to inc_allele (effective allele reported)
# if effective allele in a1 or a2, flip beta and reorder columns accordingly
mdf1 = merged[(merged.A1_e == merged.A1_g) & (merged.A2_e == merged.A2_g) & (merged.A2_e == merged.INC_ALLELE)]
mdf1 = mdf1.dropna(subset=['BETA_g'])
mdf1 = mdf1.reset_index(drop=True)
mdf1['BETA_g_adj'] = mdf1['BETA_g'] * -1
mdf1 = mdf1[['GENE', 'RSNUM', 'CHR', 'POS_HG19', 'A1_e', 'A2_e', 'INC_AFRQ_e', 'BETA_e', 'SE_e', 'LOGP', 'SNPID', 'A1_g', 'A2_g', 'A1_g', 'INC_AFRQ_g', 'BETA_g_adj', 'SE_g', 'PVAL']]
mdf1.columns = ['GENE', 'RSNUM', 'CHR', 'POS_HG19', 'A1_e', 'A2_e', 'INC_AFRQ_e', 'BETA_e', 'SE_e', 'LOGP', 'SNPID', 'A1_g', 'A2_g', 'INC_ALLELE', 'INC_AFRQ_g', 'BETA_g', 'SE_g', 'PVAL']
mdf2 = merged[(merged.A1_e == merged.A2_g) & (merged.A2_e == merged.A1_g) & (merged.A2_e == merged.INC_ALLELE)]
mdf2 = mdf2.dropna(subset=['BETA_g'])
mdf2 = mdf2.reset_index(drop=True)
#mdf2 = mdf2[np.isfinite(mdf2['BETA_g'])]
mdf2['BETA_g_adj'] = mdf2['BETA_g'] * -1
mdf2 = mdf2[['GENE', 'RSNUM', 'CHR', 'POS_HG19', 'A1_e', 'A2_e', 'INC_AFRQ_e', 'BETA_e', 'SE_e', 'LOGP', 'SNPID', 'A2_g', 'A1_g', 'A2_g', 'INC_AFRQ_g', 'BETA_g_adj', 'SE_g', 'PVAL']]
mdf2.columns = ['GENE', 'RSNUM', 'CHR', 'POS_HG19', 'A1_e', 'A2_e', 'INC_AFRQ_e', 'BETA_e', 'SE_e', 'LOGP', 'SNPID', 'A1_g', 'A2_g', 'INC_ALLELE', 'INC_AFRQ_g', 'BETA_g', 'SE_g', 'PVAL']
temp0 = mdf.append(mdf0, ignore_index=True)
temp1 = temp0.append(mdf1, ignore_index=True)
temp2 = temp1.append(mdf2, ignore_index=True)
outdf = temp2.reset_index(drop=True)
del temp0, temp1, temp2, mdf, mdf0, mdf1, mdf2
outdf.to_csv(outfile, sep='\t', index=False, na_rep='NA')
return(outdf)
if __name__ == '__main__':
# if main, run test with input files
ewkdir = argv[1]
efile = argv[2]
gwkdir = argv[3]
outdf = xinput(ewkdir, efile, gwkdir)
| 4,116 | 0 | 23 |
26538207d5f5a691a112b07bba26c641ec81cf61 | 265 | py | Python | aioli_openapi/__init__.py | jimorie/aioli-openapi | 5a5ea6471d332adc8361ad39af7421e4686811fd | [
"MIT"
] | null | null | null | aioli_openapi/__init__.py | jimorie/aioli-openapi | 5a5ea6471d332adc8361ad39af7421e4686811fd | [
"MIT"
] | null | null | null | aioli_openapi/__init__.py | jimorie/aioli-openapi | 5a5ea6471d332adc8361ad39af7421e4686811fd | [
"MIT"
] | null | null | null | from aioli import Package
from .controller import HttpController
from .service import OpenApiService
from .config import ConfigSchema
export = Package(
controllers=[HttpController],
services=[OpenApiService],
config=ConfigSchema,
auto_meta=True
)
| 20.384615 | 38 | 0.777358 | from aioli import Package
from .controller import HttpController
from .service import OpenApiService
from .config import ConfigSchema
export = Package(
controllers=[HttpController],
services=[OpenApiService],
config=ConfigSchema,
auto_meta=True
)
| 0 | 0 | 0 |
e4919b9cf916abaae343f9577cf24c5bd7884722 | 4,629 | py | Python | nlptasks/task_classification_cnn_roc_prf.py | allenwind/tf2bert | 9820223559543529d4dcc703e2742ab8fd14d58e | [
"Apache-2.0"
] | 4 | 2021-06-16T02:26:18.000Z | 2021-09-24T11:06:51.000Z | nlptasks/task_classification_cnn_roc_prf.py | allenwind/tf2bert | 9820223559543529d4dcc703e2742ab8fd14d58e | [
"Apache-2.0"
] | null | null | null | nlptasks/task_classification_cnn_roc_prf.py | allenwind/tf2bert | 9820223559543529d4dcc703e2742ab8fd14d58e | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.layers import Embedding, BatchNormalization
from tensorflow.keras.layers import Conv1D, GlobalMaxPooling1D
from sklearn.model_selection import train_test_split
from sklearn import metrics
import dataset
import evaluation
from dataset import Tokenizer
from tfutils import SaveBestModelOnMemory
# from tfx.layers.embeddings import WordEmbeddingInitializer
# classification 中 multi labels 文件
# 多分类绘制ROC、PRF等曲线的例子
# 用sigmoid进行多标签分类
# [0, 1, 1, 0, 1]
# 处理数据
X, y, categoricals = dataset.load_THUCNews_title_label()
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, random_state=732)
num_classes = len(categoricals)
# 转化成字id
ctokenizer = Tokenizer()
# 严格的交叉验证,只在训练集上构建全局词表
ctokenizer.fit(X_train)
X_train = ctokenizer.transform(X_train)
X_test = ctokenizer.transform(X_test)
# maxlen = tokenizer.find_best_maxlen(X_train, mode="mean")
maxlen = 48
print("max length is", maxlen)
X_train = sequence.pad_sequences(
X_train,
maxlen=maxlen,
dtype="int32",
padding="post",
truncating="post",
value=0)
X_test = sequence.pad_sequences(
X_test,
maxlen=maxlen,
dtype="int32",
padding="post",
truncating="post",
value=0)
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
# 模型
input_dim = ctokenizer.vocab_size
# output_dim = tokenizer.find_embedding_dims(input_dim)
output_dim = 128
# wi = WordEmbeddingInitializer(wm.vocab, path="/home/zhiwen/workspace/dataset/word2vec_baike/word2vec_baike")
# input_dim, output_dim = wi.shape
inputs = Input(shape=(maxlen,)) # (batch_size, maxlen)
x = Embedding(input_dim, output_dim,
embeddings_initializer="glorot_normal",
input_length=maxlen,
trainable=True,
mask_zero=True)(inputs) # (batch_size, maxlen, output_dim)
x = Dropout(0.2)(x)
x = Conv1D(filters=200,
kernel_size=2,
padding="same",
activation="relu",
strides=1)(x)
x = Conv1D(filters=200,
kernel_size=3,
padding="same",
activation="relu",
strides=1)(x)
x = GlobalMaxPooling1D()(x)
x = Dense(100)(x)
x = Dropout(0.2)(x)
x = Activation("relu")(x)
outputs = Dense(num_classes, activation="softmax")(x)
model = Model(inputs, outputs)
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
# 训练
batch_size = 32
epochs = 8
callbacks = [SaveBestModelOnMemory()]
model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_split=0.1)
model.summary()
y_pred = model.predict(X_test)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(num_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(num_classes)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= num_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
lw = 1
colors = itertools.cycle(
['aqua', 'darkorange', 'cornflowerblue', 'blue', 'red'])
linestyles = itertools.cycle([''])
for i, color in zip(range(num_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC to multi-class')
plt.legend(loc="lower right")
plt.show()
| 27.885542 | 110 | 0.680709 | import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.layers import Embedding, BatchNormalization
from tensorflow.keras.layers import Conv1D, GlobalMaxPooling1D
from sklearn.model_selection import train_test_split
from sklearn import metrics
import dataset
import evaluation
from dataset import Tokenizer
from tfutils import SaveBestModelOnMemory
# from tfx.layers.embeddings import WordEmbeddingInitializer
# classification 中 multi labels 文件
# 多分类绘制ROC、PRF等曲线的例子
# 用sigmoid进行多标签分类
# [0, 1, 1, 0, 1]
# 处理数据
X, y, categoricals = dataset.load_THUCNews_title_label()
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, random_state=732)
num_classes = len(categoricals)
# 转化成字id
ctokenizer = Tokenizer()
# 严格的交叉验证,只在训练集上构建全局词表
ctokenizer.fit(X_train)
X_train = ctokenizer.transform(X_train)
X_test = ctokenizer.transform(X_test)
# maxlen = tokenizer.find_best_maxlen(X_train, mode="mean")
maxlen = 48
print("max length is", maxlen)
X_train = sequence.pad_sequences(
X_train,
maxlen=maxlen,
dtype="int32",
padding="post",
truncating="post",
value=0)
X_test = sequence.pad_sequences(
X_test,
maxlen=maxlen,
dtype="int32",
padding="post",
truncating="post",
value=0)
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
# 模型
input_dim = ctokenizer.vocab_size
# output_dim = tokenizer.find_embedding_dims(input_dim)
output_dim = 128
# wi = WordEmbeddingInitializer(wm.vocab, path="/home/zhiwen/workspace/dataset/word2vec_baike/word2vec_baike")
# input_dim, output_dim = wi.shape
inputs = Input(shape=(maxlen,)) # (batch_size, maxlen)
x = Embedding(input_dim, output_dim,
embeddings_initializer="glorot_normal",
input_length=maxlen,
trainable=True,
mask_zero=True)(inputs) # (batch_size, maxlen, output_dim)
x = Dropout(0.2)(x)
x = Conv1D(filters=200,
kernel_size=2,
padding="same",
activation="relu",
strides=1)(x)
x = Conv1D(filters=200,
kernel_size=3,
padding="same",
activation="relu",
strides=1)(x)
x = GlobalMaxPooling1D()(x)
x = Dense(100)(x)
x = Dropout(0.2)(x)
x = Activation("relu")(x)
outputs = Dense(num_classes, activation="softmax")(x)
model = Model(inputs, outputs)
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
# 训练
batch_size = 32
epochs = 8
callbacks = [SaveBestModelOnMemory()]
model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_split=0.1)
model.summary()
y_pred = model.predict(X_test)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(num_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(num_classes)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(num_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= num_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
lw = 1
colors = itertools.cycle(
['aqua', 'darkorange', 'cornflowerblue', 'blue', 'red'])
linestyles = itertools.cycle([''])
for i, color in zip(range(num_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC to multi-class')
plt.legend(loc="lower right")
plt.show()
| 0 | 0 | 0 |
5059b12edbc2fec8ad15300670e5c0628bc4149c | 534 | py | Python | botx/clients/types/options.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 13 | 2021-01-21T12:43:10.000Z | 2022-03-23T11:11:59.000Z | botx/clients/types/options.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 259 | 2020-02-26T08:51:03.000Z | 2022-03-23T11:08:36.000Z | botx/clients/types/options.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 5 | 2019-12-02T16:19:22.000Z | 2021-11-22T20:33:34.000Z | """Special options for messages from bot."""
from pydantic import BaseModel
from botx.models.messages.sending.options import NotificationOptions
class ResultOptions(BaseModel):
"""Configuration for command result or notification that is send to BotX API."""
#: send message only when stealth mode is enabled.
stealth_mode: bool = False
#: use in-text mentions
raw_mentions: bool = False
#: message options for configuring notifications.
notification_opts: NotificationOptions = NotificationOptions()
| 29.666667 | 84 | 0.754682 | """Special options for messages from bot."""
from pydantic import BaseModel
from botx.models.messages.sending.options import NotificationOptions
class ResultOptions(BaseModel):
"""Configuration for command result or notification that is send to BotX API."""
#: send message only when stealth mode is enabled.
stealth_mode: bool = False
#: use in-text mentions
raw_mentions: bool = False
#: message options for configuring notifications.
notification_opts: NotificationOptions = NotificationOptions()
| 0 | 0 | 0 |
48ece775f281cd9e1431d40945120826992d65c2 | 2,877 | py | Python | source/reports/orderReports.py | MatheusDiass/BOBs_Pizzaria_Anchieta | b52188cb6411a07b67a76b0e53f3828f9cf0012d | [
"MIT"
] | 2 | 2020-05-23T21:57:29.000Z | 2020-05-23T22:03:06.000Z | source/reports/orderReports.py | MatheusDiass/BOBs_Pizzaria_Anchieta | b52188cb6411a07b67a76b0e53f3828f9cf0012d | [
"MIT"
] | 1 | 2020-05-31T18:15:47.000Z | 2020-05-31T18:15:47.000Z | source/reports/orderReports.py | MatheusDiass/BOBs_Pizzaria_Anchieta | b52188cb6411a07b67a76b0e53f3828f9cf0012d | [
"MIT"
] | null | null | null | # Import sqlite3 para tratar os erros
import _sqlite3
# Importado para formatar a data
from datetime import date, datetime
# Importa a função de relatório de pedidos
from source.db.tblOrder import selectAllOrderInformation, selectAllOrderBetweenDate
# Exibe todos os pedidos
# Exibe todos os pedidos de acordo com o periodo informado | 36.884615 | 93 | 0.572471 | # Import sqlite3 para tratar os erros
import _sqlite3
# Importado para formatar a data
from datetime import date, datetime
# Importa a função de relatório de pedidos
from source.db.tblOrder import selectAllOrderInformation, selectAllOrderBetweenDate
# Exibe todos os pedidos
def allOrderInformationReports():
try:
print('\n--------------------------------------------')
print('Relatório de Pedidos - Todos os Pedidos\n')
listAllOrder = selectAllOrderInformation()
if len(listAllOrder) == 0:
print('Não existem pedidos atuais!\n')
input('Pressione enter para continuar...')
else:
for order in listAllOrder:
# Formata a data
date = datetime.strftime(datetime.strptime(order[1], '%Y-%m-%d'), '%d/%m/%Y')
print('Cod do Pedido:', order[0])
print('Data do Pedido:', date)
print('Nome do Cliente:', order[2])
print('Preço total: {:.2f}'.format(order[3]))
print('\n')
input('Pressione enter para continuar...')
except _sqlite3.OperationalError as error:
print('\nNão foi possivel buscar os clientes')
print('Erro: ', error)
input('\nPressione enter para continuar...')
# Exibe todos os pedidos de acordo com o periodo informado
def allOrderBetweenDateReports():
try:
print('\nExemplo de data: 28/09/2010\n')
staDate = str(input('Digite a data de inicio: '))
endDate = str(input('Digite a data de fim: '))
# Formata a data
staDate = datetime.strptime(staDate, '%d/%m/%Y').date()
endDate = datetime.strptime(endDate, '%d/%m/%Y').date()
print('\n--------------------------------------------')
print('Relatório de Pedidos - Pedidos por Período\n')
listAllOrderBetweenDate = selectAllOrderBetweenDate(str(staDate), str(endDate))
if len(listAllOrderBetweenDate) == 0:
print('Não existem pedidos atuais!\n')
else:
for order in listAllOrderBetweenDate:
# Formata a data
date = datetime.strftime(datetime.strptime(order[1], '%Y-%m-%d'), '%d/%m/%Y')
print('Cod do Pedido:', order[0])
print('Data do Pedido:', date)
print('Nome do Cliente:', order[2])
print('Preço total: {:.2f}'.format(order[3]))
print('\n')
input('Pressione enter para continuar...')
except ValueError as error:
print('\nNão foi possivel buscar os pedidos')
print('Erro: ', error)
input('\nPressione enter para continuar...')
except _sqlite3.OperationalError as error:
print('\nNão foi possivel buscar os pedidos')
print('Erro: ', error)
input('\nPressione enter para continuar...') | 2,506 | 0 | 44 |
c16d2c47c38fca8e0f6ebf2f386d6e9af8743901 | 596 | py | Python | pa/db/bank.py | sannidhiteredesai/PersonalAccountant | 5609ad979edc690604eee5131c034029e595ccde | [
"MIT"
] | 3 | 2018-08-05T15:29:16.000Z | 2019-05-23T18:09:42.000Z | pa/db/bank.py | sannidhiteredesai/PersonalAccountant | 5609ad979edc690604eee5131c034029e595ccde | [
"MIT"
] | null | null | null | pa/db/bank.py | sannidhiteredesai/PersonalAccountant | 5609ad979edc690604eee5131c034029e595ccde | [
"MIT"
] | null | null | null | from tinydb import Query, where
from pa import get_db
from pa.config import Config
| 29.8 | 67 | 0.629195 | from tinydb import Query, where
from pa import get_db
from pa.config import Config
class BankDB:
def __init__(self, config=Config):
self.db = get_db(config).table('banks')
def add(self, new_bank):
self.db.insert(new_bank)
def get_all_banks(self, for_user):
return self.db.search(Query().username == for_user)
def delete_bank_branch(self, bank_name, bank_branch, username):
self.db.remove((where('bank_name') == bank_name) &
(where('bank_branch') == bank_branch) &
(where('username') == username))
| 390 | -8 | 130 |
2c65584e8066d874578c2f9877a23e7292123209 | 1,480 | py | Python | files/029 - distinct powers.py | farukara/Project-Euler-problems | 806fdbd797edd9929728b43cc428a55df50e1c01 | [
"MIT"
] | null | null | null | files/029 - distinct powers.py | farukara/Project-Euler-problems | 806fdbd797edd9929728b43cc428a55df50e1c01 | [
"MIT"
] | null | null | null | files/029 - distinct powers.py | farukara/Project-Euler-problems | 806fdbd797edd9929728b43cc428a55df50e1c01 | [
"MIT"
] | null | null | null | #!python3
# coding: utf-8
# Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
#
# 22=4, 23=8, 24=16, 25=32
# 32=9, 33=27, 34=81, 35=243
# 42=16, 43=64, 44=256, 45=1024
# 52=25, 53=125, 54=625, 55=3125
# If they are then placed in numerical order, with any repeats removed, we get the following sequence of 15 distinct terms:
#
# 4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
#
# How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤ 100 and 2 ≤ b ≤ 100?
#https://projecteuler.net/problem=29
from time import perf_counter
import matplotlib.pyplot as plt
from math import log
yset = []
ylist = []
xline = []
i = 1
while i < 101:
start = perf_counter()
using_set(i)
end = perf_counter()
yset.append(end - start)
xline.append(i)
start = perf_counter()
using_list(i)
end = perf_counter()
ylist.append(end-start)
i += (i+int(log(i)))
print(i)
plt.plot(xline, yset, label="set")
plt.plot(xline, ylist, label="list")
plt.xlabel("number of items")
plt.ylabel("time (seconds)")
plt.title("Set vs List time performance")
plt.legend()
plt.show()
| 24.666667 | 123 | 0.608784 | #!python3
# coding: utf-8
# Consider all integer combinations of ab for 2 ≤ a ≤ 5 and 2 ≤ b ≤ 5:
#
# 22=4, 23=8, 24=16, 25=32
# 32=9, 33=27, 34=81, 35=243
# 42=16, 43=64, 44=256, 45=1024
# 52=25, 53=125, 54=625, 55=3125
# If they are then placed in numerical order, with any repeats removed, we get the following sequence of 15 distinct terms:
#
# 4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
#
# How many distinct terms are in the sequence generated by ab for 2 ≤ a ≤ 100 and 2 ≤ b ≤ 100?
#https://projecteuler.net/problem=29
from time import perf_counter
import matplotlib.pyplot as plt
from math import log
def using_set(limit):
seq = set()
for i in range(2,limit):
for j in range(2,limit):
seq.add(i**j)
#print(len(seq))
def using_list(limit):
l = []
for a in range(2,limit):
for b in range (2,limit):
c = a**b
if c not in l:
l.append(c)
#print(len(l))
yset = []
ylist = []
xline = []
i = 1
while i < 101:
start = perf_counter()
using_set(i)
end = perf_counter()
yset.append(end - start)
xline.append(i)
start = perf_counter()
using_list(i)
end = perf_counter()
ylist.append(end-start)
i += (i+int(log(i)))
print(i)
plt.plot(xline, yset, label="set")
plt.plot(xline, ylist, label="list")
plt.xlabel("number of items")
plt.ylabel("time (seconds)")
plt.title("Set vs List time performance")
plt.legend()
plt.show()
| 295 | 0 | 46 |
810cfa4617bed038850f4e916bdda3f059ac8f5c | 2,029 | py | Python | lib/dawet.py | riandakarizal/ITeung | 2d3fc7e4974c9a9b67ff61f2a77a528988b55820 | [
"MIT"
] | null | null | null | lib/dawet.py | riandakarizal/ITeung | 2d3fc7e4974c9a9b67ff61f2a77a528988b55820 | [
"MIT"
] | 37 | 2020-03-22T23:21:14.000Z | 2020-09-16T15:07:06.000Z | lib/dawet.py | riandakarizal/ITeung | 2d3fc7e4974c9a9b67ff61f2a77a528988b55820 | [
"MIT"
] | 1 | 2020-09-08T11:31:30.000Z | 2020-09-08T11:31:30.000Z | import gspread
import time
from oauth2client.service_account import ServiceAccountCredentials | 37.574074 | 178 | 0.584524 | import gspread
import time
from oauth2client.service_account import ServiceAccountCredentials
class Dawet(object):
def __init__(self, filename):
self.filename = filename
self.opendb()
def opendb(self):
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secrets.json', scope)
client = gspread.authorize(creds)
self.sheet = client.open(self.filename)
def getAllData(self, sheetnum):
list_value = self.sheet.get_worksheet(sheetnum).get_all_values()
return list_value
def getData(self, rowname, colname, sheetnum):
dataError = True
while dataError:
try:
ambil = self.sheet.get_worksheet(sheetnum).cell(self.sheet.get_worksheet(sheetnum).find(rowname).row, self.sheet.get_worksheet(sheetnum).find(colname).col).value
print(colname + " selesai")
dataError = False
return ambil
except Exception as e:
print(e)
if str(e) == rowname:
dataError = False
return "not_found"
elif str(e) == colname:
dataError = False
return "pertemuan_not_found"
else:
print("wait ...")
time.sleep(10)
dataError = True
def setData(self, rowname, colname, sheetnum, content):
setv = self.sheet.get_worksheet(sheetnum).update_cell(self.sheet.get_worksheet(sheetnum).find(rowname).row, self.sheet.get_worksheet(sheetnum).find(colname).col, content)
return setv
def setRowData(self, data):
dataLimit = True
while dataLimit:
try:
self.sheet.get_worksheet(0).insert_row(data, 2)
dataLimit = False
except:
time.sleep(10)
dataLimit = True | 1,752 | -1 | 184 |
d6fffbc360e40f3fa206c22120a91856dbfad8a1 | 167 | py | Python | build.py | findgriffin/quotesforclare | 5433e9b3b3a8b42133069ff91a902ff0d53cf0da | [
"MIT"
] | null | null | null | build.py | findgriffin/quotesforclare | 5433e9b3b3a8b42133069ff91a902ff0d53cf0da | [
"MIT"
] | null | null | null | build.py | findgriffin/quotesforclare | 5433e9b3b3a8b42133069ff91a902ff0d53cf0da | [
"MIT"
] | null | null | null | import markdown
with open("index.md", 'r') as md:
output = markdown.markdown(md.read())
with open("public/index.html", 'w') as out:
out.write(output)
| 23.857143 | 47 | 0.628743 | import markdown
with open("index.md", 'r') as md:
output = markdown.markdown(md.read())
with open("public/index.html", 'w') as out:
out.write(output)
| 0 | 0 | 0 |
b7c908fd071d0ff5c30f1ca4c527eb71f4aa62d3 | 10,342 | py | Python | MCFundamental.py | aaleti/NeighboursSimilarFitness | bf087bfb8e77c79f085388fccf8aa63088f8d610 | [
"Unlicense"
] | null | null | null | MCFundamental.py | aaleti/NeighboursSimilarFitness | bf087bfb8e77c79f085388fccf8aa63088f8d610 | [
"Unlicense"
] | null | null | null | MCFundamental.py | aaleti/NeighboursSimilarFitness | bf087bfb8e77c79f085388fccf8aa63088f8d610 | [
"Unlicense"
] | null | null | null | from numpy.linalg import inv
import numpy as np
import pykov
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from itertools import cycle
import matplotlib
from matplotlib.pyplot import *
import brewer2mpl
import seaborn as sns
from scipy.stats import ks_2samp
from scipy.stats import mode
import pandas as pd
files = ["3","4","5","6","7","8","9"]
Ptype=["nsf","no_nsf"]
data=[]
datafitnesses=[]
for fNumber in files:
iterations=0
if(fNumber=="9"):
iterations=100
else:
iterations=1000
for ptype in Ptype:
for i in range(iterations):
maxF=0
with open("local-search-july-2017/"+ptype+fNumber) as f:
lines = f.readlines()
#reading the files
for k in range(0, len(lines)):
line = lines[k]
if(str(i)+") - gen" in line):
k=k+3
line = lines[k]
linex = line.split(",")
fitnesses = []
#reading the search space
#1 - 0, 2 - 3, 3 - 0, 4 - 3, 5 - 3, 6 - 1, 7 - 0, 8 - 1
for item in linex:
itemx = item.split("-")
fitnesses.append(float(itemx[1]))
fdata=[]
fdata.append(fNumber)
fdata.append(ptype)
fdata.append(float(itemx[1]))
datafitnesses.append(fdata)
#calculation of good enough fitness
modeF=mode(fitnesses)
maxF=max(fitnesses)
minF=min(fitnesses)
vge=modeF[0]+(maxF-modeF[0])/2
#reading the transition probabilities
if("it("+str(i)+");" in line):
s1=line.split(" ")
mSize=int(s1[1])
P= np.array([]).reshape(0,mSize)
for j in range(mSize):
line = lines[k+j+1]
line=line.rstrip()
row = line.split(" ")
a = np.array([])
for item in row:
itt = float(item)
a = np.append(a, itt)
P = np.vstack([P,a])
lenP=len(P)
rm= []
nvge=[]
allRm=[]
listS=[]
#Find absorbing states and optima
for j in range(lenP):
flag=0
ff = 0
for s in range(lenP):
# if there are no outgoing probabilities, then this is a local/global optimum.
if(P[j,s]>0):
ff = 1
if(j not in listS and s not in listS):
# plateoux of two solutions
if(P[j,s]==1.0 and P[s,j]==1.0):
flag=1
listS.append(j)
# absorbing state
if(P[j,s]==1.0 and j==s):
flag=1
listS.append(j)
for k in range(lenP):
if(k not in listS):
# plateoux of three solutions
if(P[j,s]==1.0 and P[s,k]==1.0 and P[k,j]==1.0):
flag=1
listS.append(j)
# plateoux of four solutions
if(P[j,s]==1.0 and P[s,j]>0 and P[s,k]>0 and (P[s,j]+P[s,k])==1.0 and P[k,s]==1.0):
flag=1
listS.append(j)
if(P[j,s]==1.0 and P[s,j]>0 and P[s,k]>0 and (P[s,j]+P[s,k])==1.0 and P[k,j]==1.0):
flag=1
listS.append(j)
# list that keep track of absorbing states and local/global optima
if(flag==1 or ff==0):
rm.append(j)
allRm.append(j)
if(fitnesses[j]<vge):
nvge.append(j)
allRm.append(j)
keptFitnesses = []
removedFitnesses = []
nvgeFitnesses = []
keep=[]
for j in range(lenP):
if(j in nvge):
nvgeFitnesses.append(fitnesses[j])
if(j not in rm and j not in nvge):
keptFitnesses.append(fitnesses[j])
keep.append(j)
if(j in rm):
removedFitnesses.append(fitnesses[j])
R=np.zeros((len(keep),len(rm)), dtype='float')
#create a vector of 1s for calculating number of visits
mat1=[]
# canonical representation by removing absorbing states and local
for j in range(len(keep)):
mat1.append(1)
for s in range(len(rm)):
R[j,s]=P[keep[j],rm[s]]
#removing
P=np.delete(P, allRm, axis=1)
P=np.delete(P, allRm, axis=0)
sm=0.0
sb=0.0
try:
if(len(P)>0):
iM=np.identity(len(P))
mM=iM-P
# Fundamental matrix
N = inv(mM)
# probability of reaching an absorbing state from any point
M=np.dot(N,R)
# expected number of steps to absorbion from any state
B=np.dot(N,mat1)
colsM = M.shape[1]
nrows=N.shape[0]
# calculating the probability of reaching a global optima
globalC=0
for j in range(colsM):
# if the absorbing state or optimum is a global optimum
if(removedFitnesses[j]==maxF):
globalC=globalC+1
sumTemp=sum(row[j] for row in M)
avgTemp=sumTemp/nrows
sm=sm+avgTemp
sm=sm/globalC
'''
colsN = N.shape[1]
for j in range(colsN):
if(keptFitnesses[j]==max):
tempf=0
for s in range(colsM):
if(M[j,s]>0.0):
tempf=1
if(tempf==0):
sumTemp=sum(row[j] for row in N)
avgTemp=sumTemp/nrows
if(avgTemp>=1.0):
avgTemp=1.0
sm=sm+avgTemp
'''
else:
countO=0
colsR = R.shape[1]
for j in range(colsR):
# if the absorbing state or optimum is a global optimum
if(removedFitnesses[j]==maxF):
countO=countO+1
sm=countO/colsR
nrows=B.shape[0]
globalC=0
for j in range(nrows):
if(removedFitnesses[j]==maxF):
globalC=globalC+1
sb=sb+B[j]
sb=sb/globalC
recD=[]
recD.append(fNumber)
recD.append(ptype)
#probability reaching global optimum
recD.append(sm)
#number of steps
recD.append(sb)
recD.append(globalC)
data.append(recD)
except:
print("error"+fNumber)
# drawing the boxplots
df = pd.DataFrame(data, columns=["N","PType","Probability","Steps","NGlobal"])
df.to_csv("MCresults.csv")
df2 = pd.DataFrame(datafitnesses, columns=["N","PType","Fitness"])
df2.to_csv("MCfitnesses.csv")
| 47.009091 | 127 | 0.314929 | from numpy.linalg import inv
import numpy as np
import pykov
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from itertools import cycle
import matplotlib
from matplotlib.pyplot import *
import brewer2mpl
import seaborn as sns
from scipy.stats import ks_2samp
from scipy.stats import mode
import pandas as pd
files = ["3","4","5","6","7","8","9"]
Ptype=["nsf","no_nsf"]
data=[]
datafitnesses=[]
for fNumber in files:
iterations=0
if(fNumber=="9"):
iterations=100
else:
iterations=1000
for ptype in Ptype:
for i in range(iterations):
maxF=0
with open("local-search-july-2017/"+ptype+fNumber) as f:
lines = f.readlines()
#reading the files
for k in range(0, len(lines)):
line = lines[k]
if(str(i)+") - gen" in line):
k=k+3
line = lines[k]
linex = line.split(",")
fitnesses = []
#reading the search space
#1 - 0, 2 - 3, 3 - 0, 4 - 3, 5 - 3, 6 - 1, 7 - 0, 8 - 1
for item in linex:
itemx = item.split("-")
fitnesses.append(float(itemx[1]))
fdata=[]
fdata.append(fNumber)
fdata.append(ptype)
fdata.append(float(itemx[1]))
datafitnesses.append(fdata)
#calculation of good enough fitness
modeF=mode(fitnesses)
maxF=max(fitnesses)
minF=min(fitnesses)
vge=modeF[0]+(maxF-modeF[0])/2
#reading the transition probabilities
if("it("+str(i)+");" in line):
s1=line.split(" ")
mSize=int(s1[1])
P= np.array([]).reshape(0,mSize)
for j in range(mSize):
line = lines[k+j+1]
line=line.rstrip()
row = line.split(" ")
a = np.array([])
for item in row:
itt = float(item)
a = np.append(a, itt)
P = np.vstack([P,a])
lenP=len(P)
rm= []
nvge=[]
allRm=[]
listS=[]
#Find absorbing states and optima
for j in range(lenP):
flag=0
ff = 0
for s in range(lenP):
# if there are no outgoing probabilities, then this is a local/global optimum.
if(P[j,s]>0):
ff = 1
if(j not in listS and s not in listS):
# plateoux of two solutions
if(P[j,s]==1.0 and P[s,j]==1.0):
flag=1
listS.append(j)
# absorbing state
if(P[j,s]==1.0 and j==s):
flag=1
listS.append(j)
for k in range(lenP):
if(k not in listS):
# plateoux of three solutions
if(P[j,s]==1.0 and P[s,k]==1.0 and P[k,j]==1.0):
flag=1
listS.append(j)
# plateoux of four solutions
if(P[j,s]==1.0 and P[s,j]>0 and P[s,k]>0 and (P[s,j]+P[s,k])==1.0 and P[k,s]==1.0):
flag=1
listS.append(j)
if(P[j,s]==1.0 and P[s,j]>0 and P[s,k]>0 and (P[s,j]+P[s,k])==1.0 and P[k,j]==1.0):
flag=1
listS.append(j)
# list that keep track of absorbing states and local/global optima
if(flag==1 or ff==0):
rm.append(j)
allRm.append(j)
if(fitnesses[j]<vge):
nvge.append(j)
allRm.append(j)
keptFitnesses = []
removedFitnesses = []
nvgeFitnesses = []
keep=[]
for j in range(lenP):
if(j in nvge):
nvgeFitnesses.append(fitnesses[j])
if(j not in rm and j not in nvge):
keptFitnesses.append(fitnesses[j])
keep.append(j)
if(j in rm):
removedFitnesses.append(fitnesses[j])
R=np.zeros((len(keep),len(rm)), dtype='float')
#create a vector of 1s for calculating number of visits
mat1=[]
# canonical representation by removing absorbing states and local
for j in range(len(keep)):
mat1.append(1)
for s in range(len(rm)):
R[j,s]=P[keep[j],rm[s]]
#removing
P=np.delete(P, allRm, axis=1)
P=np.delete(P, allRm, axis=0)
sm=0.0
sb=0.0
try:
if(len(P)>0):
iM=np.identity(len(P))
mM=iM-P
# Fundamental matrix
N = inv(mM)
# probability of reaching an absorbing state from any point
M=np.dot(N,R)
# expected number of steps to absorbion from any state
B=np.dot(N,mat1)
colsM = M.shape[1]
nrows=N.shape[0]
# calculating the probability of reaching a global optima
globalC=0
for j in range(colsM):
# if the absorbing state or optimum is a global optimum
if(removedFitnesses[j]==maxF):
globalC=globalC+1
sumTemp=sum(row[j] for row in M)
avgTemp=sumTemp/nrows
sm=sm+avgTemp
sm=sm/globalC
'''
colsN = N.shape[1]
for j in range(colsN):
if(keptFitnesses[j]==max):
tempf=0
for s in range(colsM):
if(M[j,s]>0.0):
tempf=1
if(tempf==0):
sumTemp=sum(row[j] for row in N)
avgTemp=sumTemp/nrows
if(avgTemp>=1.0):
avgTemp=1.0
sm=sm+avgTemp
'''
else:
countO=0
colsR = R.shape[1]
for j in range(colsR):
# if the absorbing state or optimum is a global optimum
if(removedFitnesses[j]==maxF):
countO=countO+1
sm=countO/colsR
nrows=B.shape[0]
globalC=0
for j in range(nrows):
if(removedFitnesses[j]==maxF):
globalC=globalC+1
sb=sb+B[j]
sb=sb/globalC
recD=[]
recD.append(fNumber)
recD.append(ptype)
#probability reaching global optimum
recD.append(sm)
#number of steps
recD.append(sb)
recD.append(globalC)
data.append(recD)
except:
print("error"+fNumber)
# drawing the boxplots
df = pd.DataFrame(data, columns=["N","PType","Probability","Steps","NGlobal"])
df.to_csv("MCresults.csv")
df2 = pd.DataFrame(datafitnesses, columns=["N","PType","Fitness"])
df2.to_csv("MCfitnesses.csv")
| 0 | 0 | 0 |
ddf6603ab028bfe68570d800eb542a92f989c9b8 | 6,947 | py | Python | goal-depth-detection-host/main.py | jonathandao0/depthai-frc | 9f1b4fc9e049f252e5f8fc53da02b9ed43d80b5a | [
"MIT"
] | 3 | 2021-11-23T17:00:55.000Z | 2022-02-17T20:23:50.000Z | goal-depth-detection-host/main.py | jonathandao0/depthai-frc | 9f1b4fc9e049f252e5f8fc53da02b9ed43d80b5a | [
"MIT"
] | null | null | null | goal-depth-detection-host/main.py | jonathandao0/depthai-frc | 9f1b4fc9e049f252e5f8fc53da02b9ed43d80b5a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import cv2
import depthai as dai
import socket
from pipelines import goal_edge_depth_detection
import logging
from common import target_finder
from common.mjpeg_stream import MjpegStream
from networktables.util import NetworkTables
from common.utils import FPSHandler
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', action="store_true", default=False, help='Start in Debug Mode')
args = parser.parse_args()
log = logging.getLogger(__name__)
if __name__ == '__main__':
log.info("Starting goal-depth-detection-host")
if args.debug:
MainDebug().run()
else:
Main().run()
| 37.349462 | 137 | 0.57622 | #!/usr/bin/env python3
import argparse
import cv2
import depthai as dai
import socket
from pipelines import goal_edge_depth_detection
import logging
from common import target_finder
from common.mjpeg_stream import MjpegStream
from networktables.util import NetworkTables
from common.utils import FPSHandler
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', action="store_true", default=False, help='Start in Debug Mode')
args = parser.parse_args()
log = logging.getLogger(__name__)
class Main:
def __init__(self):
log.info("Connected Devices:")
for device in dai.Device.getAllAvailableDevices():
log.info(f"{device.getMxId()} {device.state}")
self.init_networktables()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
except:
ip_address = 'localhost'
port = 5801
self.device_list = {"OAK-D_Goal": {
'name': "OAK-D_Goal",
'id': "14442C10218CCCD200",
'stream_address': "{}:{}".format(ip_address, port),
'nt_tab': NetworkTables.getTable("OAK-D_Goal")
}}
self.object_pipeline, self.labels = goal_edge_depth_detection.create_pipeline("infiniteRecharge2021")
self.oak_d_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port, colorspace='BW')
self.fps = FPSHandler()
def parse_goal_frame(self, frame, edgeFrame, bboxes):
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
nt_tab = self.device_list['OAK-D_Goal']['nt_tab']
if len(bboxes) == 0:
nt_tab.putString("target_label", "None")
nt_tab.putNumber("tv", 0)
else:
for bbox in bboxes:
target_label = self.labels[bbox['label']]
if target_label not in valid_labels:
continue
edgeFrame, target_x, target_y = target_finder.find_largest_contour(edgeFrame, bbox)
if target_x == -999 or target_y == -999:
log.error("Error: Could not find target contour")
continue
angle_offset = (target_x - (NN_IMG_SIZE / 2.0)) * 68.7938003540039 / 1920
if abs(angle_offset) > 30:
log.info("Invalid angle offset. Setting it to 0")
nt_tab.putNumber("tv", 0)
angle_offset = 0
else:
log.info("Found target '{}'\tX Angle Offset: {}".format(target_label, angle_offset))
nt_tab.putNumber("tv", 1)
nt_tab.putString("target_label", target_label)
nt_tab.putNumber("tx", angle_offset)
nt_tab.putNumber("tz", bbox['depth_z'])
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']),
(255, 255, 255), 2)
cv2.circle(edgeFrame, (int(round(target_x, 0)), int(round(target_y, 0))), radius=5, color=(128, 128, 128),
thickness=-1)
bbox['target_x'] = target_x
bbox['target_y'] = target_y
bbox['angle_offset'] = angle_offset
self.fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(self.fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_d_stream.send_frame(edgeFrame)
return frame, edgeFrame, bboxes
def init_networktables(self):
NetworkTables.startClientTeam(4201)
if not NetworkTables.isConnected():
log.info("Could not connect to team client. Trying other addresses...")
NetworkTables.startClient([
'10.42.1.2',
'127.0.0.1',
'10.0.0.2',
'192.168.100.108'
])
if NetworkTables.isConnected():
log.info("NT Connected to {}".format(NetworkTables.getRemoteAddress()))
return True
else:
log.error("Could not connect to NetworkTables. Restarting server...")
return False
def run(self):
log.info("Setup complete, parsing frames...")
try:
found, device_info = dai.Device.getDeviceByMxId(self.device_list['OAK-D_Goal']['id'])
self.device_list['OAK-D_Goal']['nt_tab'].putBoolean("OAK-D Goal Status", found)
if found:
self.device_list['OAK-D_Goal']['nt_tab'].putString("OAK-D_Goal Stream", self.device_list['OAK-D_Goal']['stream_address'])
for frame, edgeFrame, bboxes in goal_edge_depth_detection.capture(device_info):
self.parse_goal_frame(frame, edgeFrame, bboxes)
finally:
log.info("Exiting Program...")
class MainDebug(Main):
def __init__(self):
super().__init__()
def parse_goal_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_goal_frame(frame, edgeFrame, bboxes)
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
for bbox in bboxes:
target_label = self.labels[bbox['label']]
if target_label not in valid_labels:
continue
target_x = bbox['target_x'] if 'target_x' in bbox else 0
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
cv2.putText(edgeFrame, "x: {}".format(round(target_x, 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "z: {}".format(round(bbox['depth_z'], 2)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "label: {}".format(self.labels[bbox['label']], 1), (bbox['x_min'], bbox['y_min'] + 130),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-D Edge", edgeFrame)
cv2.imshow("OAK-D", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
if __name__ == '__main__':
log.info("Starting goal-depth-detection-host")
if args.debug:
MainDebug().run()
else:
Main().run()
| 6,078 | -9 | 208 |
0a7c1e873279235adff584e3ef7cbd71c9b3326c | 2,020 | py | Python | app/repositories/cardRepository.py | faradayyg/card-token-generator | 272907e086b12ebed580fe018dae7152ee94dcf7 | [
"MIT"
] | null | null | null | app/repositories/cardRepository.py | faradayyg/card-token-generator | 272907e086b12ebed580fe018dae7152ee94dcf7 | [
"MIT"
] | null | null | null | app/repositories/cardRepository.py | faradayyg/card-token-generator | 272907e086b12ebed580fe018dae7152ee94dcf7 | [
"MIT"
] | null | null | null | from Crypto.Cipher import AES
import base64, hashlib, json
from app.services import payment
from app.models import Vault
from app.utils import further_processing, standardize_response
| 38.113208 | 92 | 0.64703 | from Crypto.Cipher import AES
import base64, hashlib, json
from app.services import payment
from app.models import Vault
from app.utils import further_processing, standardize_response
class CardRepo:
gateway = 'briantree'
available_gateways = ['stripe', 'briantree']
def __init__(self, gateway = None):
if gateway is not None and gateway in self.available_gateways:
self.gateway = gateway
def create_token(self, user, card_number):
md5Key = hashlib.md5(user.encryption_key.encode("utf-8")).digest()
md5Key = md5Key+md5Key[0:16]
blockSize = 16
padDiff = blockSize - len(card_number) % blockSize
padding = chr(padDiff)*padDiff
card_number += padding
cipher = AES.new(md5Key, AES.MODE_CBC, user.iv_string)
ciphertext = base64.b64encode(cipher.encrypt(card_number)).decode('utf-8')
return ciphertext
def decode_token(self, user, token):
md5Key = hashlib.md5(user.encryption_key.encode("utf-8")).digest()
md5Key = md5Key+md5Key[0:16]
cipher = AES.new(md5Key, AES.MODE_CBC, user.iv_string)
decrypted = cipher.decrypt(base64.b64decode(token)).decode("utf-8")
return decrypted[:decrypted.rfind('}')+1]
def pay(self, data, user):
methods = {
'briantree': payment.Briantree(),
'stripe': payment.Stripe()
}
vault = Vault.query.filter_by(user_id=user.id).filter_by(uuid=data['token']).first()
data['card'] = json.loads(self.decode_token(user, vault.card_token))
status = methods[self.gateway].pay(data)
response = standardize_response(self.gateway, status)
if response == True:
return {"status": "success", "message": "charge successful"}
elif response == False:
return {"status": "error", "message": "charge failure"}, 500
else:
# do further processing on the transaction
return further_processing(self.gateway, response)
| 1,636 | 177 | 23 |
9a28ad71df48836ad852013ab4decef7364b3e68 | 2,513 | py | Python | ruspy/test/estimation_tests/test_estimation.py | MaxBlesch/ruspy | 5e7fb9e584c7e0d4935f4669e108bbf4e05209c6 | [
"MIT"
] | 13 | 2019-09-10T12:00:16.000Z | 2022-03-19T13:30:12.000Z | ruspy/test/estimation_tests/test_estimation.py | MaxBlesch/ruspy | 5e7fb9e584c7e0d4935f4669e108bbf4e05209c6 | [
"MIT"
] | 45 | 2019-02-17T19:39:00.000Z | 2021-08-23T17:38:40.000Z | ruspy/test/estimation_tests/test_estimation.py | MaxBlesch/ruspy | 5e7fb9e584c7e0d4935f4669e108bbf4e05209c6 | [
"MIT"
] | 9 | 2019-05-03T03:48:37.000Z | 2022-03-19T13:30:13.000Z | """
This module contains unit tests, for the most important functions of
ruspy.estimation.estimation_cost_parameters. The values to compare the results with
are saved in resources/estimation_test. The setting of the test is documented in the
inputs section in test module.
"""
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from ruspy.config import TEST_RESOURCES_DIR
from ruspy.estimation.estimation_transitions import create_transition_matrix
from ruspy.model_code.choice_probabilities import choice_prob_gumbel
from ruspy.model_code.cost_functions import calc_obs_costs
from ruspy.model_code.cost_functions import lin_cost
from ruspy.model_code.fix_point_alg import calc_fixp
from ruspy.test.ranodm_init import random_init
@pytest.fixture
@pytest.fixture
| 31.024691 | 87 | 0.712296 | """
This module contains unit tests, for the most important functions of
ruspy.estimation.estimation_cost_parameters. The values to compare the results with
are saved in resources/estimation_test. The setting of the test is documented in the
inputs section in test module.
"""
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from ruspy.config import TEST_RESOURCES_DIR
from ruspy.estimation.estimation_transitions import create_transition_matrix
from ruspy.model_code.choice_probabilities import choice_prob_gumbel
from ruspy.model_code.cost_functions import calc_obs_costs
from ruspy.model_code.cost_functions import lin_cost
from ruspy.model_code.fix_point_alg import calc_fixp
from ruspy.test.ranodm_init import random_init
@pytest.fixture
def inputs():
out = {}
out["nstates"] = 90
out["cost_fct"] = lin_cost
out["params"] = np.array([10, 2])
out["trans_prob"] = np.array([0.2, 0.3, 0.15, 0.35])
out["disc_fac"] = 0.9999
return out
@pytest.fixture
def outputs():
out = {}
out["costs"] = np.loadtxt(TEST_RESOURCES_DIR + "estimation_test/myop_cost.txt")
out["trans_mat"] = np.loadtxt(TEST_RESOURCES_DIR + "estimation_test/trans_mat.txt")
out["fixp"] = np.loadtxt(TEST_RESOURCES_DIR + "estimation_test/fixp.txt")
out["choice_probs"] = np.loadtxt(
TEST_RESOURCES_DIR + "estimation_test/choice_prob.txt"
)
return out
def test_cost_func(inputs, outputs):
assert_array_almost_equal(
calc_obs_costs(inputs["nstates"], inputs["cost_fct"], inputs["params"], 0.001),
outputs["costs"],
)
def test_create_trans_mat(inputs, outputs):
assert_array_almost_equal(
create_transition_matrix(inputs["nstates"], inputs["trans_prob"]),
outputs["trans_mat"],
)
def test_fixp(inputs, outputs):
assert_array_almost_equal(
calc_fixp(outputs["trans_mat"], outputs["costs"], inputs["disc_fac"])[0],
outputs["fixp"],
)
def test_choice_probs(inputs, outputs):
assert_array_almost_equal(
choice_prob_gumbel(outputs["fixp"], outputs["costs"], inputs["disc_fac"]),
outputs["choice_probs"],
)
def test_trans_mat_rows_one():
rand_dict = random_init()
control = np.ones(rand_dict["estimation"]["states"])
assert_array_almost_equal(
create_transition_matrix(
rand_dict["estimation"]["states"],
np.array(rand_dict["simulation"]["known_trans"]),
).sum(axis=1),
control,
)
| 1,548 | 0 | 159 |
eb4c17d2f6bc5d50a6467e03c3892b0132914fb1 | 6,123 | py | Python | exercises/adaboost_scenario.py | dani3lwinter/IML.HUJI | 46b5e001b92d7bac3b7efa2278d0236b69159895 | [
"MIT"
] | null | null | null | exercises/adaboost_scenario.py | dani3lwinter/IML.HUJI | 46b5e001b92d7bac3b7efa2278d0236b69159895 | [
"MIT"
] | null | null | null | exercises/adaboost_scenario.py | dani3lwinter/IML.HUJI | 46b5e001b92d7bac3b7efa2278d0236b69159895 | [
"MIT"
] | null | null | null | from itertools import product
import numpy as np
from typing import Tuple
from IMLearn.learners.classifiers import DecisionStump
from IMLearn.metalearners import AdaBoost
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
pio.renderers.default = "browser"
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def add_partial_decision_boundary(fig, X, y, t, learner, lims, row=None, col=None):
"""
Plot the decision boundary of ensemble with t estimators
"""
# symbols = np.array(["circle", "x"])[((y + 1) / 2).astype(int)]
predict = lambda X_: learner.partial_predict(X_, t)
accuracy = 1 - learner.partial_loss(X, y, t)
fig.add_trace(decision_surface(predict, lims[0], lims[1], showscale=False),
row=row, col=col)
class0 = y == -1
fig.add_trace(go.Scatter(x=X[class0][:, 0], y=X[class0][:, 1], mode="markers",
name="Class -1", legendgroup='Class -1', showlegend=False,
marker=dict(color="red", symbol="circle", line=dict(color="black", width=1))),
row=row, col=col)
class1 = y == 1
fig.add_trace(go.Scatter(x=X[class1][:, 0], y=X[class1][:, 1], mode="markers",
name="Class 1", legendgroup='Class 1', showlegend=False,
marker=dict(color="blue", symbol="x", line=dict(color="black", width=1))),
row=row, col=col)
fig.update_xaxes(title_text="x", row=row, col=col)
fig.update_yaxes(title_text="y", row=row, col=col)
if row is None:
fig.update_layout(title_text=f"Decision boundary of ensemble with {t} estimators, Accuracy: {accuracy:.3f}")
else:
fig.layout.annotations[2*(row-1)+col-1].update(text=f"Using {t} estimators, Accuracy: {accuracy: .2f}")
return fig
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0)
fit_and_evaluate_adaboost(0.4)
| 41.09396 | 117 | 0.594317 | from itertools import product
import numpy as np
from typing import Tuple
from IMLearn.learners.classifiers import DecisionStump
from IMLearn.metalearners import AdaBoost
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
pio.renderers.default = "browser"
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def add_partial_decision_boundary(fig, X, y, t, learner, lims, row=None, col=None):
"""
Plot the decision boundary of ensemble with t estimators
"""
# symbols = np.array(["circle", "x"])[((y + 1) / 2).astype(int)]
predict = lambda X_: learner.partial_predict(X_, t)
accuracy = 1 - learner.partial_loss(X, y, t)
fig.add_trace(decision_surface(predict, lims[0], lims[1], showscale=False),
row=row, col=col)
class0 = y == -1
fig.add_trace(go.Scatter(x=X[class0][:, 0], y=X[class0][:, 1], mode="markers",
name="Class -1", legendgroup='Class -1', showlegend=False,
marker=dict(color="red", symbol="circle", line=dict(color="black", width=1))),
row=row, col=col)
class1 = y == 1
fig.add_trace(go.Scatter(x=X[class1][:, 0], y=X[class1][:, 1], mode="markers",
name="Class 1", legendgroup='Class 1', showlegend=False,
marker=dict(color="blue", symbol="x", line=dict(color="black", width=1))),
row=row, col=col)
fig.update_xaxes(title_text="x", row=row, col=col)
fig.update_yaxes(title_text="y", row=row, col=col)
if row is None:
fig.update_layout(title_text=f"Decision boundary of ensemble with {t} estimators, Accuracy: {accuracy:.3f}")
else:
fig.layout.annotations[2*(row-1)+col-1].update(text=f"Using {t} estimators, Accuracy: {accuracy: .2f}")
return fig
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
learner = AdaBoost(DecisionStump, n_learners)
learner.fit(train_X, train_y)
# Plot the training- and test errors as a function of the number of fitted learners
num_of_learners = np.arange(1, n_learners + 1)
train_errors = [learner.partial_loss(train_X, train_y, t) for t in num_of_learners]
test_errors = [learner.partial_loss(test_X, test_y, t) for t in num_of_learners]
fig = go.Figure(data=[go.Scatter(x=num_of_learners, y=train_errors, name="Training Error"),
go.Scatter(x=num_of_learners, y=test_errors, name="Test Error")],
layout=go.Layout(title='Training and test errors as a function of the number of fitted learners',
xaxis_title='Number of learners',
yaxis_title='Error'))
fig.show()
# Question 2: Plotting decision surfaces
T = [[5, 50], [100, 250]]
lims = np.array([np.r_[train_X, test_X].min(axis=0),
np.r_[train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
fig = make_subplots(rows=2, cols=2, specs=2 * [2 * [{"type": "scatter"}]],
subplot_titles=4*["Decision surface"],
vertical_spacing=0.15,
horizontal_spacing=0.10)
fig.update_layout(title_text=f"Decision boundary of the ensemble",
xaxis_title="x", yaxis_title="y", legend_title_text='Test Set',
margin_t=50)
for row, col in product(range(2), range(2)):
add_partial_decision_boundary(fig, test_X, test_y, T[row][col], learner, lims, row=row+1, col=col+1)
fig.data[1].showlegend = True
fig.data[2].showlegend = True
fig.show()
# Question 3: Decision surface of best performing ensemble
best_T = num_of_learners[np.argmin(test_errors)]
fig = go.Figure(layout=go.Layout(legend_title_text='Test Set'))
add_partial_decision_boundary(fig, test_X, test_y, best_T, learner, lims)
fig.data[1].showlegend = True
fig.data[2].showlegend = True
fig.show()
# Question 4: Decision surface with weighted samples
fig = go.Figure(layout=go.Layout(title=f'Decision boundary of fitted model, with train set',
xaxis=dict(title='x'),
yaxis=dict(title='y')))
fig.add_trace(decision_surface(learner.predict, lims[0], lims[1], showscale=False))
max_bubble_size = 50 if noise == 0 else 5
sizeref = 2. * max(learner.D_) / (max_bubble_size ** 2)
fig.add_trace(go.Scatter(x=train_X[:, 0], y=train_X[:, 1], mode="markers",
marker=dict(color=train_y,
colorscale=[custom[0], custom[-1]],
size=learner.D_,
sizemode='area',
sizeref=sizeref,
sizemin=0.5
)
))
fig.show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0)
fit_and_evaluate_adaboost(0.4)
| 3,329 | 0 | 23 |
47a3628be6d06fce60c6aa3b96f418edd831bdb8 | 2,375 | py | Python | test.py | yhZhai/wtalc-pytorch | e8016e7849b026132d16f64852711083d735edf2 | [
"MIT"
] | 1 | 2020-05-11T00:28:47.000Z | 2020-05-11T00:28:47.000Z | test.py | yhZhai/wtalc-pytorch | e8016e7849b026132d16f64852711083d735edf2 | [
"MIT"
] | null | null | null | test.py | yhZhai/wtalc-pytorch | e8016e7849b026132d16f64852711083d735edf2 | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
import torch.optim as optim
from model import Model
from video_dataset import Dataset
from tensorboard_logger import log_value
import utils
import numpy as np
from torch.autograd import Variable
from classificationMAP import getClassificationMAP as cmAP
from detectionMAP import getDetectionMAP as dmAP
import scipy.io as sio
# torch.set_default_tensor_type('torch.FloatTensor')
| 38.934426 | 116 | 0.664842 | import torch
import torch.nn.functional as F
import torch.optim as optim
from model import Model
from video_dataset import Dataset
from tensorboard_logger import log_value
import utils
import numpy as np
from torch.autograd import Variable
from classificationMAP import getClassificationMAP as cmAP
from detectionMAP import getDetectionMAP as dmAP
import scipy.io as sio
# torch.set_default_tensor_type('torch.FloatTensor')
def test(itr, dataset, args, model, logger, device):
done = False
instance_logits_stack = []
element_logits_stack = []
labels_stack = []
while not done:
if dataset.currenttestidx % 100 == 0:
print('Testing test data point %d of %d' % (dataset.currenttestidx, len(dataset.testidx)))
features, labels, done = dataset.load_data(is_training=False)
features = torch.from_numpy(features).float().to(device)
with torch.no_grad():
_, element_logits = model(Variable(features), is_training=False)
tmp = F.softmax(torch.mean(torch.topk(element_logits, k=int(np.ceil(len(features) / 8)), dim=0)[0], dim=0),
dim=0).cpu().data.numpy()
element_logits = element_logits.cpu().data.numpy()
instance_logits_stack.append(tmp)
element_logits_stack.append(element_logits)
labels_stack.append(labels)
instance_logits_stack = np.array(instance_logits_stack)
labels_stack = np.array(labels_stack)
dmap, iou = dmAP(element_logits_stack, dataset.path_to_annotations, args)
if args.dataset_name == 'Thumos14':
test_set = sio.loadmat('test_set_meta.mat')['test_videos'][0]
for i in range(np.shape(labels_stack)[0]):
if test_set[i]['background_video'] == 'YES':
labels_stack[i, :] = np.zeros_like(labels_stack[i, :])
cmap = cmAP(instance_logits_stack, labels_stack)
print('Classification map %f' % cmap)
for i_iou, i_map in zip(iou, dmap):
print('Detection map @ %f = %f' % (i_iou, i_map))
print('AVG: {:.4f}%'.format(sum(dmap) / len(dmap)))
logger.log_value('Test Classification mAP', cmap, itr)
for item in list(zip(dmap, iou)):
logger.log_value('Test Detection mAP @ IoU = ' + str(item[1]), item[0], itr)
utils.write_to_file(args.dataset_name, dmap, cmap, itr)
| 1,909 | 0 | 25 |
7fe3b91986972c5ce11f9efef5923c262ae5e073 | 38,655 | py | Python | Steel/bolts_IC_gui.py | hotmailbox/Structural-Engineering | f34dcaec728fbb3e3a05c6f29ed5dabc621550cb | [
"BSD-3-Clause"
] | 152 | 2017-08-14T10:06:19.000Z | 2022-03-07T04:48:49.000Z | Steel/bolts_IC_gui.py | hotmailbox/Structural-Engineering | f34dcaec728fbb3e3a05c6f29ed5dabc621550cb | [
"BSD-3-Clause"
] | 15 | 2017-08-13T23:30:18.000Z | 2021-03-25T05:08:49.000Z | Steel/bolts_IC_gui.py | hotmailbox/Structural-Engineering | f34dcaec728fbb3e3a05c6f29ed5dabc621550cb | [
"BSD-3-Clause"
] | 52 | 2017-11-09T09:58:07.000Z | 2022-02-09T16:58:38.000Z | '''
BSD 3-Clause License
Copyright (c) 2019, Donald N. Bockoven III
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import division
import math as m
import Tkinter as tk
import tkMessageBox
import ttk
import tkFont
import tkFileDialog
import bolt_group_istantaneous_center as bolt_ic
if __name__ == '__main__':
main()
| 45.31653 | 464 | 0.543837 | '''
BSD 3-Clause License
Copyright (c) 2019, Donald N. Bockoven III
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import division
import math as m
import Tkinter as tk
import tkMessageBox
import ttk
import tkFont
import tkFileDialog
import bolt_group_istantaneous_center as bolt_ic
class main_window:
def __init__(self, master):
self.master = master
self.inputs = []
self.bolt_x_gui = []
self.bolt_y_gui = []
self.bolt_gui_elements = []
self.xloc = []
self.yloc = []
self.bolt_count = 0
self.hasrun=0
#self.detailed_results_gui = []
self.aisc_result_labels = []
self.aisc_has_run = 0
# Font Set
self.f_size = 8
self.helv = tkFont.Font(family=' Courier New',size=self.f_size, weight='bold')
self.helv_norm = tkFont.Font(family=' Courier New',size=self.f_size)
self.helv_res = tkFont.Font(family=' Courier New',size=self.f_size, weight='bold', underline = True)
self.mono_f = tkFont.Font(family='Consolas',size=self.f_size)
# Menubar
self.menubar = tk.Menu(self.master)
self.menu = tk.Menu(self.menubar, tearoff=0)
self.menu_props = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label = "File", menu=self.menu)
#self.menu.add_command(label="Save", command=self.save_inputs)
#self.menu.add_command(label="Open", command=self.open_existing)
self.menu.add_separator()
self.menu.add_command(label="Quit", command=self.quit_app)
try:
self.master.config(menu=self.menubar)
except AttributeError:
self.master.tk.call(master, "config", "-menu", self.menubar)
#Main Frame
self.base_frame = tk.Frame(master, bd=2, relief='sunken', padx=1,pady=1)
self.base_frame.pack(side=tk.BOTTOM, padx= 1, pady= 1, fill=tk.X)
#Base Frame Items
w=18
h=1
color='cornflower blue'
self.b_quit = tk.Button(self.base_frame,text="Quit", command=self.quit_app, font=self.helv, width=w, height=h, bg='red3')
self.b_quit.pack(side=tk.RIGHT)
self.graphics_frame = tk.Frame(master, bd=2, relief='sunken', padx=1,pady=1)
self.graphics_frame.pack(side=tk.RIGHT, padx= 1, pady= 1, fill=tk.BOTH, expand=1)
self.data_frame = tk.Frame(master, bd=2, relief='sunken', padx=1,pady=1)
self.data_frame.pack(anchor='c', padx= 1, pady= 1, fill=tk.BOTH, expand=1)
#Main Notebooks
self.nb_data = ttk.Notebook(self.data_frame)
self.nb_data.pack(fill=tk.BOTH, expand=1)
self.nb_graph = ttk.Notebook(self.graphics_frame)
self.nb_graph.pack(fill=tk.BOTH, expand=1)
#Graphics Frame tabs and canvases
#Geometry - Plan
self.graph_tab = ttk.Frame(self.nb_graph)
self.nb_graph.add(self.graph_tab, text='Graph')
self.g_plan_frame = tk.Frame(self.graph_tab, bd=2, relief='sunken', padx=1,pady=1)
self.g_plan_frame.pack(fill=tk.BOTH,expand=1, padx=5, pady=5)
self.g_plan_canvas = tk.Canvas(self.g_plan_frame, width=50, height=50, bd=2, relief='sunken', background="black")
self.g_plan_canvas.bind("<Configure>", self.draw_bolts)
self.g_plan_canvas.pack(side = tk.LEFT, anchor='c', padx= 1, pady= 1, fill=tk.BOTH, expand=1)
#Detailed Out - Tab
self.detail_tab = ttk.Frame(self.nb_graph)
self.nb_graph.add(self.detail_tab, text='Detailed Results')
self.detailed_res_frame = tk.Frame(self.detail_tab, bd=2, relief='sunken', padx=1,pady=1)
self.results_text_box = tk.Text(self.detailed_res_frame, bg= "grey90", font= self.mono_f, wrap=tk.WORD)
self.results_text_box.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.res_txt_scroll = tk.Scrollbar(self.detailed_res_frame, command=self.results_text_box.yview)
self.res_txt_scroll.pack(side=tk.LEFT, fill=tk.Y)
self.results_text_box['yscrollcommand'] = self.res_txt_scroll.set
self.detailed_res_frame.pack(fill=tk.BOTH,expand=1, padx=5, pady=5)
#Convergance Graph
self.converge_graph_tab = ttk.Frame(self.nb_graph)
self.nb_graph.add(self.converge_graph_tab, text='Convergance Graph')
self.g_converge_frame = tk.Frame(self.converge_graph_tab, bd=2, relief='sunken', padx=1,pady=1)
self.g_converge_frame.pack(fill=tk.BOTH,expand=1, padx=5, pady=5)
self.g_converge_canvas = tk.Canvas(self.g_converge_frame, width=50, height=50, bd=2, relief='sunken', background="black")
self.g_converge_canvas.bind("<Configure>", self.draw_converge)
self.g_converge_canvas.pack(side = tk.LEFT, anchor='c', padx= 1, pady= 1, fill=tk.BOTH, expand=1)
#C stability Graph
self.c_stab_graph_tab = ttk.Frame(self.nb_graph)
self.nb_graph.add(self.c_stab_graph_tab, text='C Stability Graph')
self.g_c_stab_frame = tk.Frame(self.c_stab_graph_tab, bd=2, relief='sunken', padx=1,pady=1)
self.g_c_stab_frame.pack(fill=tk.BOTH,expand=1, padx=5, pady=5)
self.g_c_stab_canvas = tk.Canvas(self.g_c_stab_frame, width=50, height=50, bd=2, relief='sunken', background="black")
self.g_c_stab_canvas.bind("<Configure>", self.draw_c_stability)
self.g_c_stab_canvas.pack(side = tk.LEFT, anchor='c', padx= 1, pady= 1, fill=tk.BOTH, expand=1)
#Data/calc Frame tabs
#Load location Angle and add bolts
self.basic_input = ttk.Frame(self.nb_data)
self.nb_data.add(self.basic_input, text='Geometry Input')
self.data_frame = tk.Frame(self.basic_input, bd=2, relief='sunken', padx=1,pady=1)
# Load - x
tk.Label(self.data_frame, text="load x: (in):", font=self.helv).grid(row=0, column=0, sticky=tk.E)
self.load_x_gui = tk.StringVar()
self.inputs.append(self.load_x_gui)
self.load_x_gui.set('5.0')
self.load_x_entry = tk.Entry(self.data_frame, textvariable=self.load_x_gui, width=10)
self.load_x_entry.grid(row=0, column=1)
# Load - y
tk.Label(self.data_frame, text="load y: (in):", font=self.helv).grid(row=1, column=0, sticky=tk.E)
self.load_y_gui = tk.StringVar()
self.inputs.append(self.load_y_gui)
self.load_y_gui.set('5.0')
self.load_y_entry = tk.Entry(self.data_frame, textvariable=self.load_y_gui, width=10)
self.load_y_entry.grid(row=1, column=1)
# Load - angle
tk.Label(self.data_frame, text="load angle: (degrees):", font=self.helv).grid(row=2, column=0, sticky=tk.E)
self.load_angle_gui = tk.StringVar()
self.inputs.append(self.load_angle_gui)
self.load_angle_gui.set('5.0')
self.load_angle_entry = tk.Entry(self.data_frame, textvariable=self.load_angle_gui, width=10)
self.load_angle_entry.grid(row=2, column=1)
tk.Label(self.data_frame, text="Bolts :", font=self.helv).grid(row=3, column=0, sticky=tk.W)
#Start X
tk.Label(self.data_frame, text="x (in) :", font=self.helv).grid(row=4, column=0, sticky=tk.E)
self.bolt_x_in = tk.StringVar()
self.bolt_x_in.set('0.0')
self.bolt_x_entry = tk.Entry(self.data_frame, textvariable=self.bolt_x_in, width=10)
self.bolt_x_entry.grid(row=4, column=1)
#Start Y
tk.Label(self.data_frame, text="y (in) :", font=self.helv).grid(row=5, column=0, sticky=tk.E)
self.bolt_y_in = tk.StringVar()
self.bolt_y_in.set('0.0')
self.bolt_y_entry= tk.Entry(self.data_frame, textvariable=self.bolt_y_in, width=10)
self.bolt_y_entry.grid(row=5, column=1)
# Button to Add Segment
self.b_add_bolt = tk.Button(self.data_frame,text="Add Bolt", command=self.add_bolt, font=self.helv, width=15, height=h, bg=color)
self.b_add_bolt.grid(row=6, column=0)
# Button to Romove Segment
self.b_remove_bolt = tk.Button(self.data_frame,text="Remove Last Bolt", command=self.remove_bolt, font=self.helv, width=15, height=h, bg=color)
self.b_remove_bolt.grid(row=6, column=1)
self.bolt_frame = tk.Frame(self.data_frame)
self.bolt_input_canvas = tk.Canvas(self.bolt_frame, background="gray", width=50, height=200)
self.bolt_canvas_frame = tk.Frame(self.bolt_input_canvas)
self.scrollforcanvas = tk.Scrollbar(self.bolt_frame, orient="vertical", command=self.bolt_input_canvas.yview)
self.scrollforcanvas.pack(side=tk.RIGHT, fill="y")
self.bolt_input_canvas.pack(side=tk.LEFT, fill="both", expand=True)
self.bolt_input_canvas.create_window(0,0, window=self.bolt_canvas_frame, anchor="nw", tags="self.bolt_canvas_frame")
self.bolt_input_canvas.configure(yscrollcommand=self.scrollforcanvas.set)
self.bolt_frame.grid(row=7, column=0, columnspan=3, sticky=tk.NSEW)
self.bolt_canvas_frame.bind("<Configure>", self.onFrameConfigure)
# Button run
self.b_run = tk.Button(self.data_frame,text="Run", command=self.run, font=self.helv, width=15, height=h, bg=color)
self.b_run.grid(row=8, column=0)
self.ic_x_gui = tk.StringVar()
self.ic_x_gui.set("--")
tk.Label(self.data_frame, text="IC x: (in)", font=self.helv).grid(row=9, column=0, sticky=tk.E)
tk.Entry(self.data_frame, textvariable=self.ic_x_gui, width=10).grid(row=9, column=1)
self.ic_y_gui = tk.StringVar()
self.ic_y_gui.set("--")
tk.Label(self.data_frame, text="IC y: (in)", font=self.helv).grid(row=10, column=0, sticky=tk.E)
tk.Entry(self.data_frame, textvariable=self.ic_y_gui, width=10).grid(row=10, column=1)
self.cu_gui = tk.StringVar()
self.cu_gui.set("--")
tk.Label(self.data_frame, text="Cu: ", font=self.helv).grid(row=11, column=0, sticky=tk.E)
tk.Entry(self.data_frame, textvariable=self.cu_gui, width=10).grid(row=11, column=1)
self.solution_gui = tk.StringVar()
self.solution_gui.set("--")
tk.Label(self.data_frame, text="Solution Useable: ", font=self.helv).grid(row=12, column=0, sticky=tk.E)
tk.Entry(self.data_frame, textvariable=self.solution_gui, width=10).grid(row=12, column=1)
self.cu_maybe_gui = tk.StringVar()
self.cu_maybe_gui.set("--")
tk.Label(self.data_frame, text="Predicted Cu: ", font=self.helv).grid(row=11, column=3, sticky=tk.E)
tk.Entry(self.data_frame, textvariable=self.cu_maybe_gui, width=10).grid(row=11, column=4)
self.tol_overide_gui = tk.StringVar()
self.tol_overide_gui.set("--")
tk.Label(self.data_frame, text="Tolerance Overide: \nDefualt: 1E-6\n-- = no overide", font=self.helv).grid(row=9, column=3, sticky=tk.E)
tk.Entry(self.data_frame, textvariable=self.tol_overide_gui, width=10).grid(row=9, column=4)
self.tol_achieved_gui = tk.StringVar()
self.tol_achieved_gui.set("--")
tk.Label(self.data_frame, text="Tolerance reached:", font=self.helv).grid(row=10, column=3, sticky=tk.E)
tk.Entry(self.data_frame, textvariable=self.tol_achieved_gui, width=10).grid(row=10, column=4)
self.data_frame.pack(fill=tk.BOTH,expand=1, padx=5, pady=5)
#AISC Table Verification
self.aisc_verify_input = ttk.Frame(self.nb_data)
self.nb_data.add(self.aisc_verify_input, text='AISC Table 7.7-7.14 Verification')
self.aisc_verify_frame = tk.Frame(self.aisc_verify_input, bd=2, relief='sunken', padx=1,pady=1)
# To match AISC table need to know:
# Number of Columns of Bolts
# Number of Rows of Bolts
# Spacing of Columns, in.
# Spacing of Rows, in.
# Load Angle from Vertical, degrees
# x eccentricity from bolt group centroid to load
# y eccentricity = 0
self.aisc_ex = [2,3,4,5,6,7,8,9,10,12,14,16,18,20,24,28,32,36]
self.aisc_numCols = tk.StringVar()
self.aisc_numCols.set("1")
tk.Label(self.aisc_verify_frame, text="Number of Columns:", font=self.helv).grid(row=0, column=0, sticky=tk.E)
tk.Entry(self.aisc_verify_frame,textvariable=self.aisc_numCols, width=10).grid(row=0, column=1)
self.aisc_numRows = tk.StringVar()
self.aisc_numRows.set("2")
tk.Label(self.aisc_verify_frame, text="Number of Rows:", font=self.helv).grid(row=0, column=2, sticky=tk.E)
tk.Entry(self.aisc_verify_frame,textvariable=self.aisc_numRows, width=10).grid(row=0, column=3)
self.aisc_colspacing = tk.StringVar()
self.aisc_colspacing.set("2")
tk.Label(self.aisc_verify_frame, text="Column Spacing (in):", font=self.helv).grid(row=1, column=0, sticky=tk.E)
tk.Entry(self.aisc_verify_frame,textvariable=self.aisc_colspacing, width=10).grid(row=1, column=1)
self.aisc_rowspacing = tk.StringVar()
self.aisc_rowspacing.set("3")
tk.Label(self.aisc_verify_frame, text="Row Spacing (in):", font=self.helv).grid(row=1, column=2, sticky=tk.E)
tk.Entry(self.aisc_verify_frame,textvariable=self.aisc_rowspacing, width=10).grid(row=1, column=3)
self.aisc_loadangle = tk.StringVar()
self.aisc_loadangle.set("0")
tk.Label(self.aisc_verify_frame, text="Load Angle from Vertical (degrees):", font=self.helv).grid(row=2, column=0, columnspan=2, sticky=tk.E)
tk.Entry(self.aisc_verify_frame,textvariable=self.aisc_loadangle, width=10).grid(row=2, column=2)
tk.Label(self.aisc_verify_frame, text="ex (in):", font=self.helv).grid(row=3, column=0, sticky=tk.E)
i=4
for ex in self.aisc_ex:
tk.Label(self.aisc_verify_frame, text='{0}'.format(ex), font=self.helv).grid(row=i, column=0, sticky=tk.E)
i+=1
# Button run AISC check
self.b_run_aisc = tk.Button(self.aisc_verify_frame,text="Calc AISC Table", command=self.run_aisc, font=self.helv, width=15, height=h, bg=color)
self.b_run_aisc.grid(row=i+1, column=0)
self.aisc_verify_frame.pack(fill=tk.BOTH,expand=1, padx=5, pady=5)
# Call function to display license dialog on app start
self.license_display()
def license_display(self, *event):
# Function to display license dialog on app start
license_string = ("Copyright (c) 2019, Donald N. Bockoven III\n"
"All rights reserved.\n\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\""
" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE"
" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE"
" DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE"
" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL"
" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR"
" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER"
" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,"
" OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE"
" OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
"https://github.com/buddyd16/Structural-Engineering/blob/master/LICENSE"
)
tkMessageBox.showerror("License Information",license_string)
self.master.focus_force()
def quit_app(self):
self.master.destroy()
self.master.quit()
def add_bolt(self, *event):
self.hasrun=0
self.bolt_count +=1
for element in self.bolt_gui_elements:
element.destroy()
del self.bolt_gui_elements[:]
self.bolt_x_gui.append(tk.StringVar())
self.bolt_y_gui.append(tk.StringVar())
x = self.bolt_x_in.get()
y = self.bolt_y_in.get()
self.bolt_x_gui[-1].set(x)
self.bolt_y_gui[-1].set(y)
for i in range(self.bolt_count):
c = tk.Label(self.bolt_canvas_frame, text="Bolt {0}".format(i), font=self.helv)
c.grid(row=i, column=0, sticky=tk.W)
a = tk.Entry(self.bolt_canvas_frame, textvariable=self.bolt_x_gui[i], width=10)
a.grid(row=i, column=1)
b = tk.Entry(self.bolt_canvas_frame, textvariable=self.bolt_y_gui[i], width=10)
b.grid(row=i, column=2)
self.bolt_gui_elements.append(c)
self.bolt_gui_elements.append(a)
self.bolt_gui_elements.append(b)
self.draw_bolts()
self.onFrameConfigure()
def remove_bolt(self, *event):
self.hasrun=0
if self.bolt_count == 0:
pass
else:
self.bolt_count -=1
for element in self.bolt_gui_elements:
element.destroy()
del self.bolt_x_gui[-1]
del self.bolt_y_gui[-1]
for i in range(self.bolt_count):
c = tk.Label(self.bolt_canvas_frame, text="Bolt {0}".format(i), font=self.helv)
c.grid(row=i, column=0, sticky=tk.W)
a = tk.Entry(self.bolt_canvas_frame, textvariable=self.bolt_x_gui[i], width=10)
a.grid(row=i, column=1)
b = tk.Entry(self.bolt_canvas_frame, textvariable=self.bolt_y_gui[i], width=10)
b.grid(row=i, column=2)
self.bolt_gui_elements.append(c)
self.bolt_gui_elements.append(a)
self.bolt_gui_elements.append(b)
self.draw_bolts()
self.onFrameConfigure()
def onFrameConfigure(self, *event):
'''Reset the scroll region to encompass the inner frame'''
self.bolt_input_canvas.configure(scrollregion=self.bolt_input_canvas.bbox("all"))
def run(self, *event):
if self.bolt_count < 2:
pass
else:
xloc = []
yloc = []
p_xloc = float(self.load_x_gui.get())
p_yloc = float(self.load_y_gui.get())
p_angle = float(self.load_angle_gui.get())
for x in self.bolt_x_gui:
xloc.append(float(x.get()))
for y in self.bolt_y_gui:
yloc.append(float(y.get()))
tol = self.tol_overide_gui.get()
if tol == "--":
tol= 0.000001
else:
tol=float(tol)
res = bolt_ic.brandt(xloc,yloc,p_xloc,p_yloc,p_angle,tol)
self.IC = res[1]
self.Cu = res[2]
self.detailed_out = res[0]
self.ic_x_gui.set("{0:.3f}".format(self.IC[0]))
self.ic_y_gui.set("{0:.3f}".format(self.IC[1]))
self.cu_gui.set("{0:.3f}".format(self.Cu))
self.solution_gui.set(self.detailed_out[12][1])
self.cu_maybe_gui.set("{0:.3f}".format(self.detailed_out[15][1]))
self.tol_achieved_gui.set("{:.3E}".format(min(self.detailed_out[17][0])))
self.hasrun=1
self.draw_bolts()
self.fill_details()
self.draw_converge()
self.draw_c_stability()
def draw_bolts(self,*event):
self.g_plan_canvas.delete("all")
w = self.g_plan_canvas.winfo_width()
h = self.g_plan_canvas.winfo_height()
# x y arrows
coord_start = 10
self.g_plan_canvas.create_line(coord_start,h-coord_start,coord_start+50,h-coord_start, fill='green', width=1, arrow=tk.LAST)
self.g_plan_canvas.create_text(coord_start+50,h-(coord_start+8), text='x', fill='green')
self.g_plan_canvas.create_line(coord_start,h-coord_start,coord_start,h-(coord_start+50), fill='green', width=1, arrow=tk.LAST)
self.g_plan_canvas.create_text(coord_start+8,h-(coord_start+50), text='y', fill='green')
# Load angle
self.g_plan_canvas.create_line(coord_start+70,h-coord_start,coord_start+125,h-(coord_start+50), fill='green', width=1, arrow=tk.FIRST)
self.g_plan_canvas.create_line(coord_start+70,h-coord_start,coord_start+125,h-coord_start, fill='green', width=1)
self.g_plan_canvas.create_text(coord_start+100,h-(coord_start+10), text='angle', fill='green')
if self.bolt_count < 2:
pass
else:
xloc = []
yloc = []
p_xloc = float(self.load_x_gui.get())
p_yloc = float(self.load_y_gui.get())
p_angle = float(self.load_angle_gui.get())
px_2 = (m.cos(m.radians(p_angle))*3)+ p_xloc
py_2 = (m.sin(m.radians(p_angle))*3) + p_yloc
for x in self.bolt_x_gui:
xloc.append(float(x.get()))
for y in self.bolt_y_gui:
yloc.append(float(y.get()))
if self.hasrun == 1:
ic_x = self.IC[0]
ic_y = self.IC[1]
else:
ic_x = xloc[-1]
ic_y = yloc[-1]
min_x = min(min(xloc),p_xloc,px_2,ic_x)
min_y = min(min(yloc),p_yloc,py_2,ic_y)
max_x = max(max(xloc),p_xloc,px_2,ic_x) - min_x
max_y = max(max(yloc),p_yloc,py_2,ic_y) - min_y
max_dim_for_scale = max(max_x,max_y)
initial = 80
if max_x == 0:
sf_x = (w - (2*initial))
else:
sf_x = (w - (2*initial)) / max_dim_for_scale
if max_y == 0:
sf_y = (h - (2*initial))
else:
sf_y = (h - (2*initial)) / max_dim_for_scale
#Load Line
x0 = ((p_xloc - min_x)*sf_x) + initial
y0 = h - (((p_yloc - min_y)*sf_y) + initial)
x1 = ((px_2 - min_x)*sf_x) + initial
y1 = h - (((py_2 - min_y)*sf_y) + initial)
self.g_plan_canvas.create_line(x0,y0,x1,y1, fill='blue', width=1, arrow=tk.FIRST)
#Bolts
for x,y in zip(xloc,yloc):
x0 = (((x - min_x) * sf_x) + initial)-5
y0 = h-(((y - min_y)*sf_y)+initial)+5
x1 = (((x - min_x) * sf_x) + initial)+5
y1 = h-(((y - min_y)*sf_y)+initial)-5
self.g_plan_canvas.create_oval(x0,y0,x1,y1, fill='green', width=1)
#IC
x0 = (((ic_x - min_x) * sf_x) + initial)-5
y0 = h-(((ic_y - min_y)*sf_y)+initial)+5
x1 = (((ic_x - min_x) * sf_x) + initial)+5
y1 = h-(((ic_y - min_y)*sf_y)+initial)-5
self.g_plan_canvas.create_oval(x0,y0,x1,y1, fill='red', width=1)
#CG
if self.hasrun == 0:
cg = [0,0]
else:
cg = self.detailed_out[1][1]
x0 = (((cg[0] - min_x) * sf_x) + initial)-5
y0 = h-(((cg[1] - min_y)*sf_y)+initial)
x1 = (((cg[0] - min_x) * sf_x) + initial)+5
y1 = h-(((cg[1] - min_y)*sf_y)+initial)
self.g_plan_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
x0 = (((cg[0] - min_x) * sf_x) + initial)
y0 = h-(((cg[1] - min_y)*sf_y)+initial)-5
x1 = (((cg[0] - min_x) * sf_x) + initial)
y1 = h-(((cg[1] - min_y)*sf_y)+initial)+5
self.g_plan_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
def fill_details(self,*event):
self.results_text_box.delete(1.0,tk.END)
if self.hasrun == 0:
pass
else:
string = "Number of Bolts: {0}".format(self.detailed_out[0])
cg = self.detailed_out[1][1]
string = string + "\nBolt Group Centroid: ({0:.3f},{1:.3f})".format(cg[0],cg[1])
string = string + "\nBolt Group J: {0:.3f}".format(self.detailed_out[2][1])
string = string + "\n\nUnit Forces:"
string = string + "\nPx,unit: {0:.3f}\nPy,unit: {1:.3f}\nMo = {2:.3f}".format(self.detailed_out[3][1],self.detailed_out[3][2], self.detailed_out[4][1])
p_xloc = float(self.load_x_gui.get())
p_yloc = float(self.load_y_gui.get())
p_angle = float(self.load_angle_gui.get())
ex = abs(self.detailed_out[1][1][0] - p_xloc)
ey = abs(self.detailed_out[1][1][1] - p_yloc)
px_2 = (m.cos(m.radians(p_angle))*3)+ p_xloc
py_2 = (m.sin(m.radians(p_angle))*3) + p_yloc
e = abs(((py_2 - p_yloc)*cg[0]) - ((px_2-p_xloc)*cg[1]) + (px_2*p_yloc) - (py_2*p_xloc)) / m.sqrt(((py_2-p_yloc)*(py_2-p_yloc)) + ((px_2 - p_xloc)*(px_2 - p_xloc)))
string = string + "\n\nLoad Location: ({0:.3f},{1:.3f})\nLoad Angle:{2:.3f}\nex = {3:.3f}\ney = {4:.3f}\ne = {5:.3f}".format(p_xloc,p_yloc,p_angle,ex,ey,e)
string = string + "\n\n{0} {1}\n{2} {3}\n".format(self.detailed_out[12][0],self.detailed_out[12][1],self.detailed_out[12][2],self.detailed_out[12][3])
string = string + "\nSum Rx: {0}\nSum Ry: {1}\nSum Mi: {2}\n\nFxx = Px-Rx = {3}\nFyy = Py-Ry = {4}\nF = {5}\nMp = {8}\n\nFprev = {6}\nCuprev = {7}\nax = {9}\nay = {10}".format(self.detailed_out[13][1],self.detailed_out[13][3],self.detailed_out[13][5],self.detailed_out[10][1],self.detailed_out[10][3],self.detailed_out[10][5],self.detailed_out[16][0],self.detailed_out[16][2],self.detailed_out[14][3],self.detailed_out[18][0],self.detailed_out[18][1])
string = string + "\n\n|{0:.^11}|{1:.^11}|{2:.^11}|{3:.^11}|{4:.^11}|{5:.^11}|{6:.^11}|{7:.^11}|{8:.^11}|\n".format("Bolt","x to IC","y to IC","di","deltai","R/Rult","Mi","Fxi","Fyi")
for i in range(self.detailed_out[0]):
string = string + "|{0:_^11}".format(i+1)
for res in self.detailed_out[13][7]:
string = string + "|{0:_^ 11.3f}".format(res[1][i])
string = string + "|\n"
self.results_text_box.insert(tk.END, string)
def draw_converge(self, *events):
self.g_converge_canvas.delete("all")
w = self.g_converge_canvas.winfo_width()
h = self.g_converge_canvas.winfo_height()
if self.hasrun == 0:
pass
else:
vals = self.detailed_out[17][0]
norm_vals = [float(i)/max(vals) for i in vals]
count = len(vals)
max_dim_for_scale = count
initial = 80
sf_x = (w - (2*initial)) / max_dim_for_scale
#x - axis:
x0 = initial
y0 = h - initial
x1 = w - initial
y1 = h - initial
self.g_converge_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
#y - axis
x0 = initial
y0 = h - initial
x1 = initial
y1 = initial
self.g_converge_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
#max val label + line
x0 = initial
y0 = initial
x1 = x0 - 5
y1 = initial
self.g_converge_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
if max(vals)<0.01:
string = "{:.3E}".format(max(vals))
else:
string = '{0:.3f}'.format(max(vals))
self.g_converge_canvas.create_text(x1-35,initial, text=string, fill='green')
#min val label + line
x0 = initial
y0 = (h-initial) - (min(norm_vals) * (h - (2*initial)))
x1 = x0 - 5
y1 = (h-initial) - (min(norm_vals) * (h - (2*initial)))
self.g_converge_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
if min(vals)<0.01:
string = "{:.3E}".format(min(vals))
else:
string = '{0:.3f}'.format(min(vals))
self.g_converge_canvas.create_text(x1-35,y0, text=string, fill='green')
x = 0
for i in range(len(norm_vals)):
x0 = (((x) * sf_x) + initial)
y0 = h - initial
x1 = x0
y1 = h - (initial-5)
self.g_converge_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
x+=1
x = 0
for y in range(len(norm_vals)):
if y+1 > len(norm_vals)-1:
pass
else:
x0 = (((x) * sf_x) + initial)
y0 = (h-initial) - (norm_vals[y] * (h - (2*initial)))
x1 = (((x+1) * sf_x) + initial)
y1 = (h-initial) - (norm_vals[y+1] * (h - (2*initial)))
if y0<=y1:
color = "blue"
else:
color = "red"
self.g_converge_canvas.create_line(x0,y0,x1,y1, fill=color, width=1)
x+=1
def draw_c_stability(self, *events):
self.g_c_stab_canvas.delete("all")
w = self.g_c_stab_canvas.winfo_width()
h = self.g_c_stab_canvas.winfo_height()
if self.hasrun == 0:
pass
else:
vals = self.detailed_out[17][1]
if max(vals)-min(vals) == 0:
norm_vals = [(float(i))/(max(vals)) for i in vals]
else:
norm_vals = [(float(i)-min(vals))/(max(vals)-min(vals)) for i in vals]
count = len(vals)
max_dim_for_scale = count
initial = 80
sf_x = (w - (2*initial)) / max_dim_for_scale
sf_y = (h - (2*initial))
#x - axis:
x0 = initial
y0 = h - initial
x1 = w - initial
y1 = h - initial
self.g_c_stab_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
#y - axis
x0 = initial
y0 = h - initial
x1 = initial
y1 = initial
self.g_c_stab_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
#max val label + line
x0 = initial
y0 = initial
x1 = x0 - 5
y1 = initial
self.g_c_stab_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
self.g_c_stab_canvas.create_text(x1-35,initial, text='{0:.4f}'.format(max(vals)), fill='green')
#min val label + line
x0 = initial
y0 = (h-initial) - (min(norm_vals) * sf_y)
x1 = x0 - 5
y1 = (h-initial) - (min(norm_vals) * sf_y)
self.g_c_stab_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
self.g_c_stab_canvas.create_text(x1-35,y0, text='{0:.4f}'.format(min(vals)), fill='green')
x = 0
for i in range(len(norm_vals)):
x0 = (((x) * sf_x) + initial)
y0 = h - initial
x1 = x0
y1 = h - (initial-5)
self.g_c_stab_canvas.create_line(x0,y0,x1,y1, fill='green', width=1)
x+=1
x = 0
for y in range(len(norm_vals)):
if y+1 > len(norm_vals)-1:
pass
else:
x0 = (((x) * sf_x) + initial)
y0 = (h-initial) - (norm_vals[y] * sf_y)
x1 = (((x+1) * sf_x) + initial)
y1 = (h-initial) - (norm_vals[y+1] * sf_y)
if y0<=y1:
color = "blue"
else:
color = "red"
self.g_c_stab_canvas.create_line(x0,y0,x1,y1, fill=color, width=1)
x+=1
def run_aisc(self, *events):
self.aisc_has_run = 0
for element in self.aisc_result_labels:
element.destroy()
del self.aisc_result_labels[:]
cols = int(self.aisc_numCols.get())
rows = int(self.aisc_numRows.get())
colspacing = float(self.aisc_colspacing.get())
rowspacing = float(self.aisc_rowspacing.get())
angle_input = float(self.aisc_loadangle.get())
angle_use = 90 - angle_input
if cols == 0 or rows == 0:
pass
else:
x,y = bolt_ic.build_bolt_group(cols, rows, colspacing, rowspacing)
cg = bolt_ic.bolt_group_center(x,y)
i=4
for ex in self.aisc_ex:
p_xloc = cg[0]+ex
p_yloc = cg[1]
p_angle = angle_use
tol = 0.00001
res = bolt_ic.brandt(x,y,p_xloc,p_yloc,p_angle,tol)
c_string = '{0:.2f}'.format(res[2])
label = tk.Label(self.aisc_verify_frame, text=c_string, font=self.helv)
label.grid(row=i, column=1)
self.aisc_result_labels.append(label)
i+=1
self.aisc_has_run = 1
self.send_aisc_geometry(x,y)
def send_aisc_geometry(self,xloc,yloc, *events):
if self.aisc_has_run == 0:
pass
else:
self.hasrun=0
for element in self.bolt_gui_elements:
element.destroy()
del self.bolt_gui_elements[:]
del self.bolt_x_gui[:]
del self.bolt_y_gui[:]
self.bolt_count = len(xloc)
for i in range(len(xloc)):
self.bolt_x_gui.append(tk.StringVar())
self.bolt_y_gui.append(tk.StringVar())
x = xloc[i]
y = yloc[i]
self.bolt_x_gui[-1].set(x)
self.bolt_y_gui[-1].set(y)
for i in range(self.bolt_count):
c = tk.Label(self.bolt_canvas_frame, text="Bolt {0}".format(i), font=self.helv)
c.grid(row=i, column=0, sticky=tk.W)
a = tk.Entry(self.bolt_canvas_frame, textvariable=self.bolt_x_gui[i], width=10)
a.grid(row=i, column=1)
b = tk.Entry(self.bolt_canvas_frame, textvariable=self.bolt_y_gui[i], width=10)
b.grid(row=i, column=2)
self.bolt_gui_elements.append(c)
self.bolt_gui_elements.append(a)
self.bolt_gui_elements.append(b)
self.draw_bolts()
def main():
root = tk.Tk()
root.title("Bolt Group Coefficient - Alpha")
main_window(root)
root.minsize(1150,600)
root.mainloop()
if __name__ == '__main__':
main()
| 36,189 | 624 | 57 |
04a9f8fe3911032dc6684bf8e345fde7a20c24c1 | 3,355 | py | Python | python/surf/devices/silabs/_Si5345Lite.py | qarlosalberto/surf | 69df91296d77efc9e812da051841545e320ebf69 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-05-13T19:56:51.000Z | 2021-05-21T13:33:02.000Z | python/surf/devices/silabs/_Si5345Lite.py | qarlosalberto/surf | 69df91296d77efc9e812da051841545e320ebf69 | [
"BSD-3-Clause-LBNL"
] | null | null | null | python/surf/devices/silabs/_Si5345Lite.py | qarlosalberto/surf | 69df91296d77efc9e812da051841545e320ebf69 | [
"BSD-3-Clause-LBNL"
] | null | null | null | #-----------------------------------------------------------------------------
# This file is part of 'SLAC Firmware Standard Library'.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of 'SLAC Firmware Standard Library', including this file,
# may be copied, modified, propagated, or distributed except according to
# the terms contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import surf.devices.silabs as silabs
import csv
import click
import fnmatch
| 37.277778 | 96 | 0.491803 | #-----------------------------------------------------------------------------
# This file is part of 'SLAC Firmware Standard Library'.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of 'SLAC Firmware Standard Library', including this file,
# may be copied, modified, propagated, or distributed except according to
# the terms contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import surf.devices.silabs as silabs
import csv
import click
import fnmatch
class Si5345Lite(pr.Device):
def __init__(self,
simpleDisplay = True,
advanceUser = False,
**kwargs):
super().__init__(size=(0x1000<<2), **kwargs)
self.add(pr.LocalVariable(
name = "CsvFilePath",
description = "Used if command's argument is empty",
mode = "RW",
value = "",
))
##############################
# Commands
##############################
@self.command(value='',description="Load the .CSV from CBPro.",)
def LoadCsvFile(arg):
# Check if non-empty argument
if (arg != ""):
path = arg
else:
# Use the variable path instead
path = self.CsvFilePath.get()
# Check for .csv file
if fnmatch.fnmatch(path, '*.csv'):
click.secho( f'{self.path}.LoadCsvFile(): {path}', fg='green')
else:
click.secho( f'{self.path}.LoadCsvFile(): {path} is not .csv', fg='red')
return
# Power down during the configuration load
self.Page0.PDN.set(True)
# Open the .CSV file
with open(path) as csvfile:
reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)
# Loop through the rows in the CSV file
for row in reader:
if (row[0]!='Address'):
self._rawWrite(
offset = (int(row[0],16)<<2),
data = int(row[1],16),
)
# Update local RemoteVariables and verify conflagration
self.readBlocks(recurse=True)
self.checkBlocks(recurse=True)
# Execute the Page5.BW_UPDATE_PLL command
self._rawWrite((0x500<<2)|(0x14 << 2),0x1)
self._rawWrite((0x500<<2)|(0x14 << 2),0x0)
# Power Up after the configuration load
self.Page0.PDN.set(False)
# Clear the internal error flags
self.Page0.ClearIntErrFlag()
##############################
# Devices
##############################
self.add(silabs.Si5345Page0(offset=(0x000<<2),simpleDisplay=simpleDisplay,expand=False))
self.add(pr.LinkVariable(
name = 'Locked',
description = 'Inverse of LOL',
mode = 'RO',
dependencies = [self.Page0.LOL],
linkedGet = lambda: (False if self.Page0.LOL.value() else True)
))
| 2,601 | 7 | 49 |
1453a426e851b72a4844eed7230b164334c62ea5 | 620 | py | Python | thinkpython_allen_downey/exercise_6_2.py | alirkaya/programming-textbook-solutions | 7362dce474b8a881d654f95604e09d1d0e76aec2 | [
"MIT"
] | null | null | null | thinkpython_allen_downey/exercise_6_2.py | alirkaya/programming-textbook-solutions | 7362dce474b8a881d654f95604e09d1d0e76aec2 | [
"MIT"
] | null | null | null | thinkpython_allen_downey/exercise_6_2.py | alirkaya/programming-textbook-solutions | 7362dce474b8a881d654f95604e09d1d0e76aec2 | [
"MIT"
] | null | null | null | # def hypotenuse(x, y):
# return 0.0
#
# print(hypotenuse(3, 4))
#
# def hypotenuse(x, y):
# square_x = x**2
# square_y = y**2
# print('square_x is', square_x)
# print('square_y is', square_y)
# return 0.0
#
# print(hypotenuse(3, 4))
#
# def hypotenuse(x, y):
# from math import sqrt
# square_x = x**2
# square_y = y**2
# h_square = square_x + square_y
# print('hypotenuse square is', h_square)
# result = sqrt(h_square)
# return result
#
# print(hypotenuse(3, 4))
print(hypotenuse(3, 4))
| 20 | 45 | 0.58871 | # def hypotenuse(x, y):
# return 0.0
#
# print(hypotenuse(3, 4))
#
# def hypotenuse(x, y):
# square_x = x**2
# square_y = y**2
# print('square_x is', square_x)
# print('square_y is', square_y)
# return 0.0
#
# print(hypotenuse(3, 4))
#
# def hypotenuse(x, y):
# from math import sqrt
# square_x = x**2
# square_y = y**2
# h_square = square_x + square_y
# print('hypotenuse square is', h_square)
# result = sqrt(h_square)
# return result
#
# print(hypotenuse(3, 4))
def hypotenuse(x, y):
from math import sqrt
return sqrt(x**2 + y**2)
print(hypotenuse(3, 4))
| 55 | 0 | 23 |
d7e66a021ecddeb23f0dbeb0f0551cb3b5f1cf3d | 1,074 | py | Python | ecosystem/plot_stargazers.py | sealuzh/docker-ecosystem-paper | 5c8b253062796baf5d154bc6f9660a7d05d3dad5 | [
"Apache-2.0"
] | 5 | 2017-05-19T15:41:46.000Z | 2021-08-03T16:52:56.000Z | ecosystem/plot_stargazers.py | sealuzh/docker-ecosystem-paper | 5c8b253062796baf5d154bc6f9660a7d05d3dad5 | [
"Apache-2.0"
] | 1 | 2019-11-18T09:26:23.000Z | 2019-11-18T09:26:23.000Z | ecosystem/plot_stargazers.py | sealuzh/docker-ecosystem-paper | 5c8b253062796baf5d154bc6f9660a7d05d3dad5 | [
"Apache-2.0"
] | 1 | 2017-05-20T13:54:14.000Z | 2017-05-20T13:54:14.000Z | #!/usr/bin/env python
# Plots stargazers of repositories.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import KernelDensity
# Based on: https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
def kde_sklearn(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scikit-learn"""
kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)
kde_skl.fit(x[:, np.newaxis])
# score_samples() returns the log-likelihood of the samples
log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])
return np.exp(log_pdf)
# read CSV with base image count:
df = pd.read_csv('./data/stargazers.csv').sort_values('stargazers', ascending=True)
plot_data = [df['stargazers']]
grid = np.linspace(1, 40000, 5000)
fig, ax = plt.subplots()
for data in plot_data:
ax.plot(grid, kde_sklearn(data, grid, bandwidth=50), alpha=0.8)
ax.legend(labels=['Overall', 'Top 1000', 'Top 100'])
ax.legend(loc='upper left')
ax.set_xlabel('Project stargazers')
# ax.set_yscale('log')
# ax.set_ylim(-0.5, 5)
plt.show() | 28.263158 | 83 | 0.72905 | #!/usr/bin/env python
# Plots stargazers of repositories.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import KernelDensity
# Based on: https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
def kde_sklearn(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scikit-learn"""
kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)
kde_skl.fit(x[:, np.newaxis])
# score_samples() returns the log-likelihood of the samples
log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])
return np.exp(log_pdf)
# read CSV with base image count:
df = pd.read_csv('./data/stargazers.csv').sort_values('stargazers', ascending=True)
plot_data = [df['stargazers']]
grid = np.linspace(1, 40000, 5000)
fig, ax = plt.subplots()
for data in plot_data:
ax.plot(grid, kde_sklearn(data, grid, bandwidth=50), alpha=0.8)
ax.legend(labels=['Overall', 'Top 1000', 'Top 100'])
ax.legend(loc='upper left')
ax.set_xlabel('Project stargazers')
# ax.set_yscale('log')
# ax.set_ylim(-0.5, 5)
plt.show() | 0 | 0 | 0 |
bcd58dc314a69f1b85201cceac9b86fb58297c42 | 21,978 | py | Python | contrail-topology/contrail_topology/controller.py | biswajit-mandal/contrail-analytics | 393157153c223925d1dabdc2e173da90ab61aa50 | [
"Apache-2.0"
] | null | null | null | contrail-topology/contrail_topology/controller.py | biswajit-mandal/contrail-analytics | 393157153c223925d1dabdc2e173da90ab61aa50 | [
"Apache-2.0"
] | null | null | null | contrail-topology/contrail_topology/controller.py | biswajit-mandal/contrail-analytics | 393157153c223925d1dabdc2e173da90ab61aa50 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from analytic_client import AnalyticApiClient
import time, socket, os
from topology_uve import LinkUve
import gevent
from gevent.lock import Semaphore
from opserver.consistent_schdlr import ConsistentScheduler
from topology_config_handler import TopologyConfigHandler
import traceback
import ConfigParser
import signal
import random
import hashlib
from sandesh.topology_info.ttypes import TopologyInfo, TopologyUVE
from sandesh.link.ttypes import RemoteType, RemoteIfInfo, VRouterL2IfInfo,\
VRouterL2IfUVE
| 47.16309 | 105 | 0.503231 | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from analytic_client import AnalyticApiClient
import time, socket, os
from topology_uve import LinkUve
import gevent
from gevent.lock import Semaphore
from opserver.consistent_schdlr import ConsistentScheduler
from topology_config_handler import TopologyConfigHandler
import traceback
import ConfigParser
import signal
import random
import hashlib
from sandesh.topology_info.ttypes import TopologyInfo, TopologyUVE
from sandesh.link.ttypes import RemoteType, RemoteIfInfo, VRouterL2IfInfo,\
VRouterL2IfUVE
class PRouter(object):
def __init__(self, name, data):
self.name = name
self.data = data
class Controller(object):
def __init__(self, config):
self._config = config
self._hostname = socket.gethostname()
self.analytic_api = AnalyticApiClient(self._config)
self._config.random_collectors = self._config.collectors()
self._chksum = ""
if self._config.collectors():
self._chksum = hashlib.md5("".join(self._config.collectors())).hexdigest()
self._config.random_collectors = random.sample(self._config.collectors(), \
len(self._config.collectors()))
self.uve = LinkUve(self._config)
self._sandesh = self.uve.sandesh_instance()
self._logger = self.uve.logger()
self.sleep_time()
self._sem = Semaphore()
self._members = None
self._partitions = None
self._prouters = {}
self._vrouter_l2ifs = {}
self._old_vrouter_l2ifs = {}
self._config_handler = TopologyConfigHandler(self._sandesh,
self._config.rabbitmq_params(), self._config.cassandra_params())
self.constnt_schdlr = ConsistentScheduler(self.uve._moduleid,
zookeeper=self._config.zookeeper_server(),
delete_hndlr=self._del_uves, logger=self._logger,
cluster_id=self._config.cluster_id())
def sleep_time(self, newtime=None):
if newtime:
self._sleep_time = newtime
else:
self._sleep_time = self._config.frequency()
return self._sleep_time
def get_vrouters(self):
self.analytic_api.get_vrouters(True)
self.vrouters = {}
self.vrouter_ips = {}
self.vrouter_macs = {}
for vr in self.analytic_api.list_vrouters():
cfilt = ['VrouterAgent:phy_if', 'VrouterAgent:self_ip_list',
'VRouterL2IfInfo']
try:
d = self.analytic_api.get_vrouter(vr, ','.join(cfilt))
except Exception as e:
traceback.print_exc()
print str(e)
d = {}
if 'VrouterAgent' not in d or\
'self_ip_list' not in d['VrouterAgent'] or\
'phy_if' not in d['VrouterAgent']:
continue
self.vrouters[vr] = {'ips': d['VrouterAgent']['self_ip_list'],
'if': d['VrouterAgent']['phy_if']
}
try:
self.vrouters[vr]['l2_if'] = d['VRouterL2IfInfo']['if_info']
except KeyError:
pass
for ip in d['VrouterAgent']['self_ip_list']:
self.vrouter_ips[ip] = vr # index
for intf in d['VrouterAgent']['phy_if']:
try:
self.vrouter_macs[intf['mac_address']] = {}
self.vrouter_macs[intf['mac_address']]['vrname'] = vr
self.vrouter_macs[intf['mac_address']]['ifname'] = intf['name']
except:
continue
def get_prouters(self):
self.analytic_api.get_prouters(True)
self.prouters = []
for pr in self.analytic_api.list_prouters():
try:
data = self.analytic_api.get_prouter(pr, 'PRouterEntry')
if data:
self.prouters.append(PRouter(pr, data))
except Exception as e:
traceback.print_exc()
print str(e)
def _is_linkup(self, prouter, ifindex):
if 'PRouterEntry' in prouter.data and \
'ifIndexOperStatusTable' in prouter.data['PRouterEntry']:
status = filter(lambda x: x['ifIndex'] == ifindex,
prouter.data['PRouterEntry']['ifIndexOperStatusTable'])
if status and status[0]['ifOperStatus'] == 1:
return True
return False
def _add_link(self, prouter, remote_system_name, local_interface_name,
remote_interface_name, local_interface_index,
remote_interface_index, link_type):
# If the remote_system_name or remote_interface_name is None, do not
# add this link in the link_table.
if not all([remote_system_name, remote_interface_name]):
return False
d = dict(remote_system_name=remote_system_name,
local_interface_name=local_interface_name,
remote_interface_name=remote_interface_name,
local_interface_index=local_interface_index,
remote_interface_index=remote_interface_index,
type=link_type)
if link_type == RemoteType.VRouter:
l2_if = self.vrouters[remote_system_name].get('l2_if')
if l2_if and remote_interface_name in l2_if:
if l2_if[remote_interface_name]['remote_system_name'] != \
prouter.name:
return False
if self._is_linkup(prouter, local_interface_index):
if prouter.name in self.link:
self.link[prouter.name].append(d)
else:
self.link[prouter.name] = [d]
return True
return False
def _chk_lnk(self, pre, index):
if 'ifIndexOperStatusTable' in pre:
for d in pre['ifIndexOperStatusTable']:
if d['ifIndex'] == index:
return d['ifOperStatus'] == 1
return False
def _send_topology_uve(self, members, partitions, prouters):
topology_info = TopologyInfo()
if self._members != members:
self._members = members
topology_info.members = members
if self._partitions != partitions:
self._partitions = partitions
topology_info.partitions = partitions
new_prouters = {p.name: p for p in prouters}
if self._prouters.keys() != new_prouters.keys():
deleted_prouters = [v for p, v in self._prouters.iteritems() \
if p not in new_prouters]
self._del_uves(deleted_prouters)
self._prouters = new_prouters
topology_info.prouters = self._prouters.keys()
if topology_info != TopologyInfo():
topology_info.name = self._hostname
TopologyUVE(data=topology_info).send()
# end _send_topology_uve
def bms_links(self, prouter, ifm):
try:
for lif_fqname, lif in self._config_handler.get_logical_interfaces():
if prouter.name in lif_fqname:
for vmif in lif.obj.get_virtual_machine_interface_refs():
vmi = self._config_handler.\
get_virtual_machine_interface(fq_name=None,
uuid=vmif['uuid'])
if not vmi:
continue
vmi = vmi.obj
for mc in vmi.virtual_machine_interface_mac_addresses.\
get_mac_address():
ifi = [k for k in ifm if ifm[k] in lif_fqname][0]
rsys = '-'.join(['bms', 'host'] + mc.split(':'))
self._add_link(prouter=prouter,
remote_system_name=rsys,
local_interface_name=lif.obj.fq_name[-1],
remote_interface_name='em0',#no idea
local_interface_index=ifi,
remote_interface_index=1, #dont know TODO:FIX
link_type=RemoteType.BMS)
except:
traceback.print_exc()
def compute(self):
self.link = {}
self._old_vrouter_l2ifs = self._vrouter_l2ifs
self._vrouter_l2ifs = {}
for prouter in self.constnt_schdlr.work_items():
pr, d = prouter.name, prouter.data
if 'PRouterEntry' not in d or 'ifTable' not in d['PRouterEntry']:
continue
self.link[pr] = []
lldp_ints = []
ifm = dict(map(lambda x: (x['ifIndex'], x['ifDescr']),
d['PRouterEntry']['ifTable']))
self.bms_links(prouter, ifm)
for pl in d['PRouterEntry']['lldpTable']['lldpRemoteSystemsData']:
if d['PRouterEntry']['lldpTable']['lldpLocalSystemData'][
'lldpLocSysDesc'].startswith('Cisco'):
loc_pname = [x for x in d['PRouterEntry']['lldpTable'][
'lldpLocalSystemData']['lldpLocPortTable'] if x[
'lldpLocPortNum'] == pl['lldpRemLocalPortNum']][
0]['lldpLocPortDesc']
pl['lldpRemLocalPortNum'] = [k for k in ifm if ifm[
k] == loc_pname][0]
elif d['PRouterEntry']['lldpTable']['lldpLocalSystemData'][
'lldpLocSysDesc'].startswith('Arista'):
loc_pname = [x for x in d['PRouterEntry']['lldpTable'][
'lldpLocalSystemData']['lldpLocPortTable'] if x[
'lldpLocPortNum'] == pl['lldpRemLocalPortNum']][
0]['lldpLocPortId']
pl['lldpRemLocalPortNum'] = [k for k in ifm if ifm[
k] == loc_pname][0]
if pl['lldpRemLocalPortNum'] in ifm and self._chk_lnk(
d['PRouterEntry'], pl['lldpRemLocalPortNum']):
if pl['lldpRemPortId'].isdigit():
rii = int(pl['lldpRemPortId'])
else:
try:
if d['PRouterEntry']['lldpTable']['lldpLocalSystemData'][
'lldpLocSysDesc'].startswith('Arista'):
rpn = filter(lambda y: y['lldpLocPortId'] == pl[
'lldpRemPortId'], [
x for x in self.prouters if x.name == pl[
'lldpRemSysName']][0].data['PRouterEntry'][
'lldpTable']['lldpLocalSystemData'][
'lldpLocPortTable'])[0]['lldpLocPortId']
else:
rpn = filter(lambda y: y['lldpLocPortId'] == pl[
'lldpRemPortId'], [
x for x in self.prouters if x.name == pl[
'lldpRemSysName']][0].data['PRouterEntry'][
'lldpTable']['lldpLocalSystemData'][
'lldpLocPortTable'])[0]['lldpLocPortDesc']
rii = filter(lambda y: y['ifDescr'] == rpn,
[ x for x in self.prouters \
if x.name == pl['lldpRemSysName']][0].data[
'PRouterEntry']['ifTable'])[0]['ifIndex']
except:
rii = 0
if d['PRouterEntry']['lldpTable']['lldpLocalSystemData'][
'lldpLocSysDesc'].startswith('Arista'):
if self._add_link(
prouter=prouter,
remote_system_name=pl['lldpRemSysName'],
local_interface_name=ifm[pl['lldpRemLocalPortNum']],
remote_interface_name=pl['lldpRemPortId'],
local_interface_index=pl['lldpRemLocalPortNum'],
remote_interface_index=rii,
link_type=RemoteType.PRouter):
lldp_ints.append(ifm[pl['lldpRemLocalPortNum']])
else:
if self._add_link(
prouter=prouter,
remote_system_name=pl['lldpRemSysName'],
local_interface_name=ifm[pl['lldpRemLocalPortNum']],
remote_interface_name=pl['lldpRemPortDesc'],
local_interface_index=pl['lldpRemLocalPortNum'],
remote_interface_index=rii,
link_type=RemoteType.PRouter):
lldp_ints.append(ifm[pl['lldpRemLocalPortNum']])
vrouter_l2ifs = {}
if 'fdbPortIfIndexTable' in d['PRouterEntry']:
dot1d2snmp = map (lambda x: (
x['dot1dBasePortIfIndex'],
x['snmpIfIndex']),
d['PRouterEntry']['fdbPortIfIndexTable'])
dot1d2snmp_dict = dict(dot1d2snmp)
if 'fdbPortTable' in d['PRouterEntry']:
for mac_entry in d['PRouterEntry']['fdbPortTable']:
if mac_entry['mac'] in self.vrouter_macs:
vrouter_mac_entry = self.vrouter_macs[mac_entry['mac']]
vr_name = vrouter_mac_entry['vrname']
vr_ifname = vrouter_mac_entry['ifname']
fdbport = mac_entry['dot1dBasePortIfIndex']
try:
snmpport = dot1d2snmp_dict[fdbport]
ifname = ifm[snmpport]
except:
continue
is_lldp_int = any(ifname == lldp_int for lldp_int in lldp_ints)
if is_lldp_int:
continue
if self._add_link(
prouter=prouter,
remote_system_name=vr_name,
local_interface_name=ifname,
remote_interface_name=vr_ifname,
local_interface_index=snmpport,
remote_interface_index=1, #dont know TODO:FIX
link_type=RemoteType.VRouter):
if vr_name not in vrouter_l2ifs:
vrouter_l2ifs[vr_name] = {}
vrouter_l2ifs[vr_name][vr_ifname] = {
'remote_system_name': prouter.name,
'remote_if_name': ifname,
}
for arp in d['PRouterEntry']['arpTable']:
if arp['ip'] in self.vrouter_ips:
if arp['mac'] in map(lambda x: x['mac_address'],
self.vrouters[self.vrouter_ips[arp['ip']]]['if']):
vr_name = self.vrouter_macs[arp['mac']]['vrname']
vr_ifname = self.vrouter_macs[arp['mac']]['ifname']
try:
if vrouter_l2ifs[vr_name][vr_ifname]\
['remote_system_name'] == prouter.name:
del vrouter_l2ifs[vr_name][vr_ifname]
if not vrouter_l2ifs[vr_name]:
del vrouter_l2ifs[vr_name]
continue
except KeyError:
pass
if ifm[arp['localIfIndex']].startswith('vlan'):
continue
if ifm[arp['localIfIndex']].startswith('irb'):
continue
is_lldp_int = any(ifm[arp['localIfIndex']] == lldp_int for lldp_int in lldp_ints)
if is_lldp_int:
continue
if self._add_link(
prouter=prouter,
remote_system_name=vr_name,
local_interface_name=ifm[arp['localIfIndex']],
remote_interface_name=vr_ifname,
local_interface_index=arp['localIfIndex'],
remote_interface_index=1, #dont know TODO:FIX
link_type=RemoteType.VRouter):
pass
for vr, intf in vrouter_l2ifs.iteritems():
if vr in self._vrouter_l2ifs:
self._vrouter_l2ifs[vr].update(vrouter_l2ifs[vr])
else:
self._vrouter_l2ifs[vr] = intf
def send_uve(self):
old_vrs = set(self._old_vrouter_l2ifs.keys())
new_vrs = set(self._vrouter_l2ifs.keys())
del_vrs = old_vrs - new_vrs
add_vrs = new_vrs - old_vrs
same_vrs = old_vrs.intersection(new_vrs)
for vr in del_vrs:
vr_l2info = VRouterL2IfInfo(name=vr, deleted=True)
VRouterL2IfUVE(data=vr_l2info).send()
for vr in add_vrs:
if_info = {}
for vrif, remif_info in self._vrouter_l2ifs[vr].iteritems():
if_info[vrif] = RemoteIfInfo(remif_info['remote_system_name'],
remif_info['remote_if_name'])
vr_l2info = VRouterL2IfInfo(name=vr, if_info=if_info)
VRouterL2IfUVE(data=vr_l2info).send()
for vr in same_vrs:
if self._vrouter_l2ifs[vr] != self._old_vrouter_l2ifs[vr]:
if_info = {}
for vrif, remif_info in self._vrouter_l2ifs[vr].iteritems():
if_info[vrif] = RemoteIfInfo(
remif_info['remote_system_name'],
remif_info['remote_if_name'])
vr_l2info = VRouterL2IfInfo(name=vr, if_info=if_info)
VRouterL2IfUVE(data=vr_l2info).send()
self.uve.send(self.link)
def switcher(self):
gevent.sleep(0)
def scan_data(self):
t = []
t.append(gevent.spawn(self.get_vrouters))
t.append(gevent.spawn(self.get_prouters))
gevent.joinall(t)
def _del_uves(self, prouters):
with self._sem:
for prouter in prouters:
self.uve.delete(prouter.name)
def sighup_handler(self):
if self._config._args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(self._config._args.conf_file)
if 'DEFAULTS' in config.sections():
try:
collectors = config.get('DEFAULTS', 'collectors')
if type(collectors) is str:
collectors = collectors.split()
new_chksum = hashlib.md5("".join(collectors)).hexdigest()
if new_chksum != self._chksum:
self._chksum = new_chksum
self._config.random_collectors = \
random.sample(collectors, len(collectors))
# Reconnect to achieve load-balance irrespective of list
self.uve.sandesh_reconfig_collectors(
self._config.random_collectors)
except ConfigParser.NoOptionError as e:
pass
# end sighup_handler
def _uve_scanner(self):
while True:
self.scan_data()
if self.constnt_schdlr.schedule(self.prouters):
members = self.constnt_schdlr.members()
partitions = self.constnt_schdlr.partitions()
self._send_topology_uve(members, partitions,
self.constnt_schdlr.work_items())
try:
with self._sem:
self.compute()
self.send_uve()
except Exception as e:
traceback.print_exc()
print str(e)
gevent.sleep(self._sleep_time)
else:
gevent.sleep(1)
# end _uve_scanner
def run(self):
""" @sighup
SIGHUP handler to indicate configuration changes
"""
gevent.signal(signal.SIGHUP, self.sighup_handler)
self.gevs = [
gevent.spawn(self._config_handler.start),
gevent.spawn(self._uve_scanner)
]
try:
gevent.joinall(self.gevs)
except KeyboardInterrupt:
self._logger.error('Exiting on ^C')
except gevent.GreenletExit:
self._logger.error('Exiting on gevent-kill')
finally:
self._logger.error('stopping everything!')
self.stop()
# end run
def stop(self):
self.uve.stop()
l = len(self.gevs)
for i in range(0, l):
self._logger.error('killing %d of %d' % (i+1, l))
self.gevs[0].kill()
self._logger.error('joining %d of %d' % (i+1, l))
self.gevs[0].join()
self._logger.error('stopped %d of %d' % (i+1, l))
self.gevs.pop(0)
self.constnt_schdlr.finish()
# end stop
| 20,135 | 1,191 | 72 |
f31a318e5b8203dfd8fa0fa413989c87bccad9bd | 2,821 | py | Python | scratch.py | derekvantilborg/molml_tools | 5a5baaa21a4b3b91e59c1a350d04db3fd5102e4e | [
"MIT"
] | null | null | null | scratch.py | derekvantilborg/molml_tools | 5a5baaa21a4b3b91e59c1a350d04db3fd5102e4e | [
"MIT"
] | null | null | null | scratch.py | derekvantilborg/molml_tools | 5a5baaa21a4b3b91e59c1a350d04db3fd5102e4e | [
"MIT"
] | null | null | null | # conda install scikit-learn
# conda install -c conda-forge scikit-optimize
# conda install -c conda-forge rdkit
import pandas as pd
# from Tools.Clustering.butina import cluster_molecules
from molml.Datastructures.molecule import Dataset
from molml.Data import read_csv
from molml.Representations.descriptors import ecfp
from molml.Representations.strings import smiles_one_hot
from sklearn.ensemble import GradientBoostingRegressor
from molml.Tools.optimize import BayesianOpt
from molml.Tools.metrics import rmse
import numpy as np
molecules = read_csv(f"example_data/CHEMBL2047_EC50.csv", smiles_col='smiles', label_col='exp_mean [nM]')
data = Dataset(molecules[:50], name='CHEMBL2047', transform=smiles_one_hot, target_transform=minlog)
data.process()
data.show(10)
from molml.Tools.cluster import spectral
from molml.Viz.multivariate import TSNE, PCA
import seaborn as sns
clusters = spectral(molecules, k=10)
tsne = TSNE(n_components=2, perplexity=50, n_iter=500)
tsne.fit(molecules, use_n_principal_components=50)
tsne.show(color_by=clusters, palette=sns.color_palette("hls", 10))
pca = PCA(n_components=2)
pca.fit(molecules)
pca.show(color_by=clusters, palette=sns.color_palette("hls", 10))
from molml.Tools.splitting import stratified_split_molecules
train, test, val = stratified_split_molecules(molecules, labels=clusters)
data = Dataset(molecules, name='CHEMBL2047', transform=ecfp, target_transform=minlog)
data.process()
data.show(13)
hpm = {"learning_rate": [0.1, 0.01],
"max_depth": [1, 2, 3, 4, 5, 6, 7, 8],
"n_estimators": [5, 10, 20, 100, 200, 300]}
model = GradientBoostingRegressor
opt = BayesianOpt(model, data)
opt.opt(hpm, rmse, cv=5, n_calls=20)
opt.show()
# def fold_split_knn(dataset, k: int = 10, random_state: int = 42):
# from sklearn.cluster import KMeans
#
# clust = KMeans(n_clusters=10)
# clust.fit(x)
history = [(1,0.7201,0.7201),(2,0.6329,0.6329),(3,0.6305,0.6305),(4,0.6323,0.6305),(5,0.7195,0.6305),(6,0.6137,0.6137),
(7,0.6201,0.6137),(8,0.6239,0.6137),(9,0.6404,0.6137),(10,0.6264,0.6137),(11,0.6718,0.6137),(12,0.6368,0.6137),
(13,0.6337,0.6137),(14,0.6502,0.6137),(15,0.6235,0.6137),(16,0.6303,0.6137),(17,0.6171,0.6137),(18,0.6268,0.6137),
(19,0.6117,0.6117),(20,0.6170,0.6117)]
history = pd.DataFrame( columns=['Iteration', 'Score', 'Best Score'])
history['Score'].tolist()[-1]
len(history['Score'])
pd.DataFrame({'Iteration': [21], 'Score': [0.544], 'Best Score': [0.544]})
## TODO active learning
# split data train test -> make TSNE
# optimize model on train
# train model
# predict on test
# find most uncertain compounds
#
# python setup.py bdist_wheel
# python -m pip install dist/MoleculeACE-1.0.5-py3-none-any.whl
#
# twine upload dist/*
| 28.785714 | 125 | 0.720312 | # conda install scikit-learn
# conda install -c conda-forge scikit-optimize
# conda install -c conda-forge rdkit
import pandas as pd
# from Tools.Clustering.butina import cluster_molecules
from molml.Datastructures.molecule import Dataset
from molml.Data import read_csv
from molml.Representations.descriptors import ecfp
from molml.Representations.strings import smiles_one_hot
from sklearn.ensemble import GradientBoostingRegressor
from molml.Tools.optimize import BayesianOpt
from molml.Tools.metrics import rmse
import numpy as np
def minlog(x):
return -np.log10(x)
molecules = read_csv(f"example_data/CHEMBL2047_EC50.csv", smiles_col='smiles', label_col='exp_mean [nM]')
data = Dataset(molecules[:50], name='CHEMBL2047', transform=smiles_one_hot, target_transform=minlog)
data.process()
data.show(10)
from molml.Tools.cluster import spectral
from molml.Viz.multivariate import TSNE, PCA
import seaborn as sns
clusters = spectral(molecules, k=10)
tsne = TSNE(n_components=2, perplexity=50, n_iter=500)
tsne.fit(molecules, use_n_principal_components=50)
tsne.show(color_by=clusters, palette=sns.color_palette("hls", 10))
pca = PCA(n_components=2)
pca.fit(molecules)
pca.show(color_by=clusters, palette=sns.color_palette("hls", 10))
from molml.Tools.splitting import stratified_split_molecules
train, test, val = stratified_split_molecules(molecules, labels=clusters)
data = Dataset(molecules, name='CHEMBL2047', transform=ecfp, target_transform=minlog)
data.process()
data.show(13)
hpm = {"learning_rate": [0.1, 0.01],
"max_depth": [1, 2, 3, 4, 5, 6, 7, 8],
"n_estimators": [5, 10, 20, 100, 200, 300]}
model = GradientBoostingRegressor
opt = BayesianOpt(model, data)
opt.opt(hpm, rmse, cv=5, n_calls=20)
opt.show()
# def fold_split_knn(dataset, k: int = 10, random_state: int = 42):
# from sklearn.cluster import KMeans
#
# clust = KMeans(n_clusters=10)
# clust.fit(x)
history = [(1,0.7201,0.7201),(2,0.6329,0.6329),(3,0.6305,0.6305),(4,0.6323,0.6305),(5,0.7195,0.6305),(6,0.6137,0.6137),
(7,0.6201,0.6137),(8,0.6239,0.6137),(9,0.6404,0.6137),(10,0.6264,0.6137),(11,0.6718,0.6137),(12,0.6368,0.6137),
(13,0.6337,0.6137),(14,0.6502,0.6137),(15,0.6235,0.6137),(16,0.6303,0.6137),(17,0.6171,0.6137),(18,0.6268,0.6137),
(19,0.6117,0.6117),(20,0.6170,0.6117)]
history = pd.DataFrame( columns=['Iteration', 'Score', 'Best Score'])
history['Score'].tolist()[-1]
len(history['Score'])
pd.DataFrame({'Iteration': [21], 'Score': [0.544], 'Best Score': [0.544]})
## TODO active learning
# split data train test -> make TSNE
# optimize model on train
# train model
# predict on test
# find most uncertain compounds
#
# python setup.py bdist_wheel
# python -m pip install dist/MoleculeACE-1.0.5-py3-none-any.whl
#
# twine upload dist/*
| 17 | 0 | 23 |
8a643272ae06be634a32e0ab7072e549a34dede7 | 2,217 | py | Python | 4/vendor/gistfile1.py | JarryShaw/HelloWorld | 669984fa415e9bb65f5b7c261ec4f87ffbe56c6d | [
"Apache-2.0"
] | 1 | 2017-12-22T14:15:08.000Z | 2017-12-22T14:15:08.000Z | 4/vendor/gistfile1.py | JarryShaw/HelloWorld | 669984fa415e9bb65f5b7c261ec4f87ffbe56c6d | [
"Apache-2.0"
] | 1 | 2018-01-16T09:22:52.000Z | 2018-01-16T09:22:52.000Z | 4/vendor/gistfile1.py | JarryShaw/HelloWorld | 669984fa415e9bb65f5b7c261ec4f87ffbe56c6d | [
"Apache-2.0"
] | 1 | 2018-01-16T07:50:00.000Z | 2018-01-16T07:50:00.000Z | alphabet = "0123456789."
code = input()
grid = []
variables = []
loops = 10
for i in range(100):
grid.append(00)
while code[0] != "3" or code[1] != "." or code[-1] != "4":
code = input("Code invalid. ")
code += "000000"
i = 2
while i < len(code) - 6:
variables = []
variables.append(int(code[i+1] + code[i+2]))
variables.append(int(code[i+3] + code[i+4]))
variables.append(int(code[i+5] + code[i+6]))
if code[i] == "0":
grid[variables[0]] = grid[variables[1]] + grid[variables[2]]
i += 7
elif code[i] == "1":
grid[variables[0]] = grid[variables[1]] - grid[variables[2]]
i += 7
elif code[i] == "2":
grid[variables[0]] = grid[variables[1]] * grid[variables[2]]
i += 7
elif code[i] == "3":
grid[variables[0]] = grid[variables[1]] / grid[variables[2]]
i += 7
elif code[i] == "4":
i = len(code)
elif code[i] == "5":
print(chr(grid[variables[0]]),end='')
i += 3
elif code[i] == "6":
grid[variables[0]] = variables[1]
i += 5
elif code[i] == "7":
grid[variables[0]] = ord(input())
i += 3
elif code[i] == "8":
if grid[variables[0]] == 0:
found = False
nests = 0
while found == False:
i += 1
if code[i] == "8":
nests += 1
elif code[i] == "9":
if nests == 0:
i += 1
found = True
else:
nests -= 1
elif grid[variables[0]] != 0:
i += 1
found = True
elif code[i] == "9":
storei = i
nests = 0
returned = False
while returned == False:
i -= 1
if code[i] == "9":
nests += 1
elif code[i] == "8":
if nests == 0:
if grid[int(str(code[i+1]) + str(code[i+2]))] == 0:
i = storei
returned = True
else:
returned = True
else:
print("Error found with character " + code[i])
| 28.063291 | 71 | 0.412269 | alphabet = "0123456789."
code = input()
grid = []
variables = []
loops = 10
for i in range(100):
grid.append(00)
while code[0] != "3" or code[1] != "." or code[-1] != "4":
code = input("Code invalid. ")
code += "000000"
i = 2
while i < len(code) - 6:
variables = []
variables.append(int(code[i+1] + code[i+2]))
variables.append(int(code[i+3] + code[i+4]))
variables.append(int(code[i+5] + code[i+6]))
if code[i] == "0":
grid[variables[0]] = grid[variables[1]] + grid[variables[2]]
i += 7
elif code[i] == "1":
grid[variables[0]] = grid[variables[1]] - grid[variables[2]]
i += 7
elif code[i] == "2":
grid[variables[0]] = grid[variables[1]] * grid[variables[2]]
i += 7
elif code[i] == "3":
grid[variables[0]] = grid[variables[1]] / grid[variables[2]]
i += 7
elif code[i] == "4":
i = len(code)
elif code[i] == "5":
print(chr(grid[variables[0]]),end='')
i += 3
elif code[i] == "6":
grid[variables[0]] = variables[1]
i += 5
elif code[i] == "7":
grid[variables[0]] = ord(input())
i += 3
elif code[i] == "8":
if grid[variables[0]] == 0:
found = False
nests = 0
while found == False:
i += 1
if code[i] == "8":
nests += 1
elif code[i] == "9":
if nests == 0:
i += 1
found = True
else:
nests -= 1
elif grid[variables[0]] != 0:
i += 1
found = True
elif code[i] == "9":
storei = i
nests = 0
returned = False
while returned == False:
i -= 1
if code[i] == "9":
nests += 1
elif code[i] == "8":
if nests == 0:
if grid[int(str(code[i+1]) + str(code[i+2]))] == 0:
i = storei
returned = True
else:
returned = True
else:
print("Error found with character " + code[i])
| 0 | 0 | 0 |
faaa8707ba2e3914afcd146434676d67944dc037 | 7,959 | py | Python | paasta_tools/paastaapi/models/marathon_autoscaling_info.py | rohangulati/paasta | 4539e39159424bfbdeddcb243ca337bcd1eb1a06 | [
"Apache-2.0"
] | null | null | null | paasta_tools/paastaapi/models/marathon_autoscaling_info.py | rohangulati/paasta | 4539e39159424bfbdeddcb243ca337bcd1eb1a06 | [
"Apache-2.0"
] | null | null | null | paasta_tools/paastaapi/models/marathon_autoscaling_info.py | rohangulati/paasta | 4539e39159424bfbdeddcb243ca337bcd1eb1a06 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Paasta API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from paasta_tools.paastaapi.configuration import Configuration
class MarathonAutoscalingInfo(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'current_instances': 'int',
'current_utilization': 'float',
'max_instances': 'int',
'min_instances': 'int',
'target_instances': 'int'
}
attribute_map = {
'current_instances': 'current_instances',
'current_utilization': 'current_utilization',
'max_instances': 'max_instances',
'min_instances': 'min_instances',
'target_instances': 'target_instances'
}
def __init__(self, current_instances=None, current_utilization=None, max_instances=None, min_instances=None, target_instances=None, local_vars_configuration=None): # noqa: E501
"""MarathonAutoscalingInfo - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._current_instances = None
self._current_utilization = None
self._max_instances = None
self._min_instances = None
self._target_instances = None
self.discriminator = None
if current_instances is not None:
self.current_instances = current_instances
if current_utilization is not None:
self.current_utilization = current_utilization
if max_instances is not None:
self.max_instances = max_instances
if min_instances is not None:
self.min_instances = min_instances
if target_instances is not None:
self.target_instances = target_instances
@property
def current_instances(self):
"""Gets the current_instances of this MarathonAutoscalingInfo. # noqa: E501
The number of instances of the service currently running # noqa: E501
:return: The current_instances of this MarathonAutoscalingInfo. # noqa: E501
:rtype: int
"""
return self._current_instances
@current_instances.setter
def current_instances(self, current_instances):
"""Sets the current_instances of this MarathonAutoscalingInfo.
The number of instances of the service currently running # noqa: E501
:param current_instances: The current_instances of this MarathonAutoscalingInfo. # noqa: E501
:type current_instances: int
"""
self._current_instances = current_instances
@property
def current_utilization(self):
"""Gets the current_utilization of this MarathonAutoscalingInfo. # noqa: E501
The current utilization of the instances' allocated resources # noqa: E501
:return: The current_utilization of this MarathonAutoscalingInfo. # noqa: E501
:rtype: float
"""
return self._current_utilization
@current_utilization.setter
def current_utilization(self, current_utilization):
"""Sets the current_utilization of this MarathonAutoscalingInfo.
The current utilization of the instances' allocated resources # noqa: E501
:param current_utilization: The current_utilization of this MarathonAutoscalingInfo. # noqa: E501
:type current_utilization: float
"""
self._current_utilization = current_utilization
@property
def max_instances(self):
"""Gets the max_instances of this MarathonAutoscalingInfo. # noqa: E501
The maximum number of instances that the autoscaler will scale to # noqa: E501
:return: The max_instances of this MarathonAutoscalingInfo. # noqa: E501
:rtype: int
"""
return self._max_instances
@max_instances.setter
def max_instances(self, max_instances):
"""Sets the max_instances of this MarathonAutoscalingInfo.
The maximum number of instances that the autoscaler will scale to # noqa: E501
:param max_instances: The max_instances of this MarathonAutoscalingInfo. # noqa: E501
:type max_instances: int
"""
self._max_instances = max_instances
@property
def min_instances(self):
"""Gets the min_instances of this MarathonAutoscalingInfo. # noqa: E501
The minimum number of instances that the autoscaler will scale to # noqa: E501
:return: The min_instances of this MarathonAutoscalingInfo. # noqa: E501
:rtype: int
"""
return self._min_instances
@min_instances.setter
def min_instances(self, min_instances):
"""Sets the min_instances of this MarathonAutoscalingInfo.
The minimum number of instances that the autoscaler will scale to # noqa: E501
:param min_instances: The min_instances of this MarathonAutoscalingInfo. # noqa: E501
:type min_instances: int
"""
self._min_instances = min_instances
@property
def target_instances(self):
"""Gets the target_instances of this MarathonAutoscalingInfo. # noqa: E501
The autoscaler's current target number of instances of this service to run # noqa: E501
:return: The target_instances of this MarathonAutoscalingInfo. # noqa: E501
:rtype: int
"""
return self._target_instances
@target_instances.setter
def target_instances(self, target_instances):
"""Sets the target_instances of this MarathonAutoscalingInfo.
The autoscaler's current target number of instances of this service to run # noqa: E501
:param target_instances: The target_instances of this MarathonAutoscalingInfo. # noqa: E501
:type target_instances: int
"""
self._target_instances = target_instances
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MarathonAutoscalingInfo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MarathonAutoscalingInfo):
return True
return self.to_dict() != other.to_dict()
| 33.868085 | 181 | 0.650333 | # coding: utf-8
"""
Paasta API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from paasta_tools.paastaapi.configuration import Configuration
class MarathonAutoscalingInfo(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'current_instances': 'int',
'current_utilization': 'float',
'max_instances': 'int',
'min_instances': 'int',
'target_instances': 'int'
}
attribute_map = {
'current_instances': 'current_instances',
'current_utilization': 'current_utilization',
'max_instances': 'max_instances',
'min_instances': 'min_instances',
'target_instances': 'target_instances'
}
def __init__(self, current_instances=None, current_utilization=None, max_instances=None, min_instances=None, target_instances=None, local_vars_configuration=None): # noqa: E501
"""MarathonAutoscalingInfo - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._current_instances = None
self._current_utilization = None
self._max_instances = None
self._min_instances = None
self._target_instances = None
self.discriminator = None
if current_instances is not None:
self.current_instances = current_instances
if current_utilization is not None:
self.current_utilization = current_utilization
if max_instances is not None:
self.max_instances = max_instances
if min_instances is not None:
self.min_instances = min_instances
if target_instances is not None:
self.target_instances = target_instances
@property
def current_instances(self):
"""Gets the current_instances of this MarathonAutoscalingInfo. # noqa: E501
The number of instances of the service currently running # noqa: E501
:return: The current_instances of this MarathonAutoscalingInfo. # noqa: E501
:rtype: int
"""
return self._current_instances
@current_instances.setter
def current_instances(self, current_instances):
"""Sets the current_instances of this MarathonAutoscalingInfo.
The number of instances of the service currently running # noqa: E501
:param current_instances: The current_instances of this MarathonAutoscalingInfo. # noqa: E501
:type current_instances: int
"""
self._current_instances = current_instances
@property
def current_utilization(self):
"""Gets the current_utilization of this MarathonAutoscalingInfo. # noqa: E501
The current utilization of the instances' allocated resources # noqa: E501
:return: The current_utilization of this MarathonAutoscalingInfo. # noqa: E501
:rtype: float
"""
return self._current_utilization
@current_utilization.setter
def current_utilization(self, current_utilization):
"""Sets the current_utilization of this MarathonAutoscalingInfo.
The current utilization of the instances' allocated resources # noqa: E501
:param current_utilization: The current_utilization of this MarathonAutoscalingInfo. # noqa: E501
:type current_utilization: float
"""
self._current_utilization = current_utilization
@property
def max_instances(self):
"""Gets the max_instances of this MarathonAutoscalingInfo. # noqa: E501
The maximum number of instances that the autoscaler will scale to # noqa: E501
:return: The max_instances of this MarathonAutoscalingInfo. # noqa: E501
:rtype: int
"""
return self._max_instances
@max_instances.setter
def max_instances(self, max_instances):
"""Sets the max_instances of this MarathonAutoscalingInfo.
The maximum number of instances that the autoscaler will scale to # noqa: E501
:param max_instances: The max_instances of this MarathonAutoscalingInfo. # noqa: E501
:type max_instances: int
"""
self._max_instances = max_instances
@property
def min_instances(self):
"""Gets the min_instances of this MarathonAutoscalingInfo. # noqa: E501
The minimum number of instances that the autoscaler will scale to # noqa: E501
:return: The min_instances of this MarathonAutoscalingInfo. # noqa: E501
:rtype: int
"""
return self._min_instances
@min_instances.setter
def min_instances(self, min_instances):
"""Sets the min_instances of this MarathonAutoscalingInfo.
The minimum number of instances that the autoscaler will scale to # noqa: E501
:param min_instances: The min_instances of this MarathonAutoscalingInfo. # noqa: E501
:type min_instances: int
"""
self._min_instances = min_instances
@property
def target_instances(self):
"""Gets the target_instances of this MarathonAutoscalingInfo. # noqa: E501
The autoscaler's current target number of instances of this service to run # noqa: E501
:return: The target_instances of this MarathonAutoscalingInfo. # noqa: E501
:rtype: int
"""
return self._target_instances
@target_instances.setter
def target_instances(self, target_instances):
"""Sets the target_instances of this MarathonAutoscalingInfo.
The autoscaler's current target number of instances of this service to run # noqa: E501
:param target_instances: The target_instances of this MarathonAutoscalingInfo. # noqa: E501
:type target_instances: int
"""
self._target_instances = target_instances
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MarathonAutoscalingInfo):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MarathonAutoscalingInfo):
return True
return self.to_dict() != other.to_dict()
| 0 | 0 | 0 |
7851d839dad479e83abf6f0e090c76610a28cb3d | 5,100 | py | Python | pymvg/test/test_first_principles.py | hop-soellingeraj/pymvg | 2b99ccb459063f34dbe801bdbbfcf1209b1fb3e5 | [
"MIT"
] | 84 | 2015-04-23T02:22:08.000Z | 2022-02-22T01:58:53.000Z | pymvg/test/test_first_principles.py | hop-soellingeraj/pymvg | 2b99ccb459063f34dbe801bdbbfcf1209b1fb3e5 | [
"MIT"
] | 8 | 2019-10-23T00:04:01.000Z | 2021-11-22T18:58:08.000Z | pymvg/test/test_first_principles.py | hop-soellingeraj/pymvg | 2b99ccb459063f34dbe801bdbbfcf1209b1fb3e5 | [
"MIT"
] | 18 | 2015-10-12T23:14:24.000Z | 2021-11-22T18:46:38.000Z | #!/usr/bin/env python
import numpy as np
from pymvg.test.utils import _build_points_3d, make_M
import os
from pymvg.util import normalize
from pymvg.camera_model import CameraModel
DRAW=int(os.environ.get('DRAW','0'))
if DRAW:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pymvg.plot_utils import plot_camera
if __name__=='__main__':
test_simple_projection()
test_lookat()
| 32.075472 | 80 | 0.604706 | #!/usr/bin/env python
import numpy as np
from pymvg.test.utils import _build_points_3d, make_M
import os
from pymvg.util import normalize
from pymvg.camera_model import CameraModel
DRAW=int(os.environ.get('DRAW','0'))
if DRAW:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pymvg.plot_utils import plot_camera
def test_lookat():
dist = 5.0
# build camera
center_expected = np.array( [10, 5, 20] )
lookat_expected = center_expected + np.array( [dist, 0, 0] ) # looking in +X
up_expected = np.array( [0, 0, 1] )
f = 300.0 # focal length
width, height = 640, 480
cx, cy = width/2.0, height/2.0
M = np.array( [[ f, 0, cx, 0],
[ 0, f, cy, 0],
[ 0, 0, 1, 0]])
cam1 = CameraModel.load_camera_from_M( M, width=width, height=height)
cam = cam1.get_view_camera(center_expected, lookat_expected, up_expected)
del cam1
# check that the extrinsic parameters were what we expected
(center_actual,lookat_actual,up_actual) = cam.get_view()
lookdir_expected = normalize( lookat_expected - center_expected )
lookdir_actual = normalize( lookat_actual - center_actual )
assert np.allclose( center_actual, center_expected )
assert np.allclose( lookdir_actual, lookdir_expected )
assert np.allclose( up_actual, up_expected )
# check that the extrinsics work as expected
pts = np.array([lookat_expected,
lookat_expected+up_expected])
eye_actual = cam.project_3d_to_camera_frame( pts )
eye_expected = [[0, 0, dist], # camera looks at +Z
[0,-1, dist], # with -Y as up
]
assert np.allclose( eye_actual, eye_expected )
# now check some basics of the projection
pix_actual = cam.project_3d_to_pixel( pts )
pix_expected = [[cx,cy], # center pixel on the camera
[cx,cy-(f/dist)]]
assert np.allclose( pix_actual, pix_expected )
def test_flip():
for distortion in (False,True):
yield check_flip, distortion
def check_flip(distortion=False):
if distortion:
d = [0.1, 0.2, 0.3, 0.4, 0.5]
else:
d = None
# build camera
center_expected = np.array( [10, 5, 20] )
lookat_expected = center_expected + np.array( [1, 2, 0] )
up_expected = np.array( [0, 0, 1] )
width, height = 640, 480
M = np.array( [[ 300.0, 0, 321, 0],
[ 0, 298.0, 240, 0],
[ 0, 0, 1, 0]])
cam1 = CameraModel.load_camera_from_M( M, width=width, height=height,
distortion_coefficients=d )
cam = cam1.get_view_camera(center_expected, lookat_expected, up_expected)
del cam1
pts = np.array([lookat_expected,
lookat_expected+up_expected,
[1,2,3],
[4,5,6]])
pix_actual = cam.project_3d_to_pixel( pts )
# Flipped camera gives same 3D->2D transform but different look direction.
cf = cam.get_flipped_camera()
assert not np.allclose( cam.get_lookat(), cf.get_lookat() )
pix_actual_flipped = cf.project_3d_to_pixel( pts )
assert np.allclose( pix_actual, pix_actual_flipped )
def test_simple_projection():
# get some 3D points
pts_3d = _build_points_3d()
if DRAW:
fig = plt.figure(figsize=(8,12))
ax1 = fig.add_subplot(3,1,1, projection='3d')
ax1.scatter( pts_3d[:,0], pts_3d[:,1], pts_3d[:,2])
ax1.set_xlabel('X')
ax1.set_ylabel('Y')
ax1.set_zlabel('Z')
# build a camera calibration matrix
focal_length = 1200
width, height = 640,480
R = np.eye(3) # look at +Z
c = np.array( (9.99, 19.99, 20) )
M = make_M( focal_length, width, height, R, c)['M']
# now, project these 3D points into our image plane
pts_3d_H = np.vstack( (pts_3d.T, np.ones( (1,len(pts_3d))))) # make homog.
undist_rst_simple = np.dot(M, pts_3d_H) # multiply
undist_simple = undist_rst_simple[:2,:]/undist_rst_simple[2,:] # project
if DRAW:
ax2 = fig.add_subplot(3,1,2)
ax2.plot( undist_simple[0,:], undist_simple[1,:], 'b.')
ax2.set_xlim(0,width)
ax2.set_ylim(height,0)
ax2.set_title('matrix multiply')
# build a camera model from our M and project onto image plane
cam = CameraModel.load_camera_from_M( M, width=width, height=height )
undist_full = cam.project_3d_to_pixel(pts_3d).T
if DRAW:
plot_camera( ax1, cam, scale=10, axes_size=5.0 )
sz = 20
x = 5
y = 8
z = 19
ax1.auto_scale_xyz( [x,x+sz], [y,y+sz], [z,z+sz] )
ax3 = fig.add_subplot(3,1,3)
ax3.plot( undist_full[0,:], undist_full[1,:], 'b.')
ax3.set_xlim(0,width)
ax3.set_ylim(height,0)
ax3.set_title('pymvg')
if DRAW:
plt.show()
assert np.allclose( undist_full, undist_simple )
if __name__=='__main__':
test_simple_projection()
test_lookat()
| 4,580 | 0 | 92 |
a9795bb69cedbf18f80dc8b60dec3a74eb7e7217 | 23 | py | Python | colored_graph/__init__.py | SyrianSpock/colored-graph | dd182bcff8f09b7e38b73142c713abdc0e276919 | [
"MIT"
] | null | null | null | colored_graph/__init__.py | SyrianSpock/colored-graph | dd182bcff8f09b7e38b73142c713abdc0e276919 | [
"MIT"
] | null | null | null | colored_graph/__init__.py | SyrianSpock/colored-graph | dd182bcff8f09b7e38b73142c713abdc0e276919 | [
"MIT"
] | null | null | null | name = "colored_graph"
| 11.5 | 22 | 0.73913 | name = "colored_graph"
| 0 | 0 | 0 |
6f94639bb7da81753f9a1947683bcad48ae1179f | 7,143 | py | Python | Connect4/bot.py | iridia-ulb/AI-book | 965c6e217a8d2371c64a7e01e7b9145302bcf40f | [
"MIT"
] | 2 | 2021-12-24T21:08:46.000Z | 2022-03-16T20:30:14.000Z | Connect4/bot.py | iridia-ulb/AI-book | 965c6e217a8d2371c64a7e01e7b9145302bcf40f | [
"MIT"
] | null | null | null | Connect4/bot.py | iridia-ulb/AI-book | 965c6e217a8d2371c64a7e01e7b9145302bcf40f | [
"MIT"
] | null | null | null | import random
import math
from common import (
ROW_COUNT,
COLUMN_COUNT,
MINIMAX,
MONTE_CARLO,
RANDOM,
RANDOM_IMPR,
Observer,
)
YELLOW_PLAYER = 1
RED_PLAYER = -1
PLAYERS = {1: "Yellow", -1: "Red"}
class Bot(Observer):
"""
This class handles the different bots that were used.
It includes a Random Bot, an Improved Random Bot, the MCTS bot,
and the MiniMax bot.
"""
def __init__(
self, game, bot_type=None, depth=None, iteration=None, pruning=True
):
"""
Constructor of the Bot class.
:param game: corresponding Connect4Game instance
:param bot_type: specifies the bot (MCTS, MiniMax, Random, ...)
:param depth: depth used in the Minimax algorithm if the Minimax bot is used
:param iteration: number of iterations used in the MCTS algorithm in case the MCTS bot is used
:param pruning: boolean used for the pruning in the Minimax algorithm if the Minimax bot is used
"""
self._game = game
# Bot type determines how the bot picks his moves
self._type = bot_type
if self._type == MINIMAX:
self._depth = depth
self._pruning = pruning
elif self._type == MONTE_CARLO:
self._iteration = iteration
def make_move(self):
"""
Picks the column in which the bot should place the next disc.
The considered moving options depend on the bot type.
:return: the column number where the bot should play the next move
"""
# print(PLAYERS[self._game._turn] + " is about to play :")
column = None
# In case the bot type is RANDOM, the bot checks for winning moves, and if there aren't,
# then picks a valid random move.
if self._type == RANDOM:
win_col = self.get_winning_move()
if win_col is not None:
column = win_col
else:
column = self.get_random_move()
# In case the bot type is RANDOM IMPROVED, the bot checks for winning moves, and if there aren't,
# then checks if there is any move that blocks a direct winning move for the opponent.
# If there is no such move, it picks a valid random move.
elif self._type == RANDOM_IMPR:
win_col = self.get_winning_move()
if win_col is not None:
# print("Winning column :", win_col)
column = win_col
else:
def_move = self.get_defensive_move()
if def_move is not None:
# print("Defensive column :", def_move)
column = def_move
else:
column = self.get_random_move()
# print("Random move", column)
elif self._type == MINIMAX:
column, minimax_score = self.minimax(
self._game._board,
self._depth,
-math.inf,
math.inf,
True,
self._pruning,
)
# print(column)
elif self._type == MONTE_CARLO:
o = Node(self._game.copy_state())
column = self.monte_carlo_tree_search(self._iteration, o, 2.0)
else:
column = 0
# print("-------------------------")
self._game.place(column)
def get_winning_move(self):
"""
Checks whether there is a winning column available for the next
move of the bot.
:return: winning column
"""
column = None
for c_win in range(self._game._cols):
for r in range(self._game._rows):
if self._game._board[c_win][r] == 0:
self._game._board[c_win][r] = self._game._turn
is_winner = self._game.check_win((c_win, r))
self._game._board[c_win][r] = 0
if is_winner:
column = c_win
return column
break
return column
def get_valid_locations(self, board):
"""
Returns all the valid columns where the player can play, aka the columns
that are not full
:param board: actual state of the game, board of the game
:return: list of all valid column indices
"""
free_cols = []
for i in range(COLUMN_COUNT):
if board[i][ROW_COUNT - 1] == 0:
free_cols.append(i)
# print()
if len(free_cols) == 0:
return None
return free_cols
def get_random_move(self):
"""
Picks a valid random column where the bot can play his next move.
:return: valid random column
"""
free_cols = self.get_valid_locations(self._game._board)
column = random.choice(free_cols)
return column
def get_defensive_move(self):
"""
Checks whether the bot could play a move that blocks a direct winning
move from the opponent.
:return: column to be played to avoid losing immediatly
"""
column = None
for c_win in range(self._game._cols):
for r in range(self._game._rows):
if self._game._board[c_win][r] == 0:
self._game._board[c_win][r] = -1 * self._game._turn
is_winner = self._game.check_win((c_win, r))
self._game._board[c_win][r] = 0
if is_winner:
column = c_win
return column
break
return column
class Node:
"""
This class is used to represent nodes of the tree of boards used during
Monte-Carlo Tree Search.
"""
def add_child(self, child_state, move):
"""
Add a child to the current node.
:param child_state: state of the child to add
:param move: move to do to get to the newly added child
"""
child = Node(child_state, parent=self)
self.children.append(child)
self.children_moves.append(move)
def update(self, reward):
"""
Update the node's reward (indicates how good a certain node is
according to the MCTS algorithm)
:param reward: reward to be added to the node
"""
self.reward += reward
self.visits += 1
def fully_explored(self):
"""
Checks if the node is fully explored (which means we can not add
any more children to this node)
:return: True of False depending on if it is fully epxlored or not
"""
if len(self.children) == len(self.state.get_valid_locations()):
return True
return False
| 32.766055 | 105 | 0.557749 | import random
import math
from common import (
ROW_COUNT,
COLUMN_COUNT,
MINIMAX,
MONTE_CARLO,
RANDOM,
RANDOM_IMPR,
Observer,
)
YELLOW_PLAYER = 1
RED_PLAYER = -1
PLAYERS = {1: "Yellow", -1: "Red"}
class Bot(Observer):
"""
This class handles the different bots that were used.
It includes a Random Bot, an Improved Random Bot, the MCTS bot,
and the MiniMax bot.
"""
def __init__(
self, game, bot_type=None, depth=None, iteration=None, pruning=True
):
"""
Constructor of the Bot class.
:param game: corresponding Connect4Game instance
:param bot_type: specifies the bot (MCTS, MiniMax, Random, ...)
:param depth: depth used in the Minimax algorithm if the Minimax bot is used
:param iteration: number of iterations used in the MCTS algorithm in case the MCTS bot is used
:param pruning: boolean used for the pruning in the Minimax algorithm if the Minimax bot is used
"""
self._game = game
# Bot type determines how the bot picks his moves
self._type = bot_type
if self._type == MINIMAX:
self._depth = depth
self._pruning = pruning
elif self._type == MONTE_CARLO:
self._iteration = iteration
def __repr__(self):
return self._type
def update(self, obj, event, *argv):
print(obj)
def make_move(self):
"""
Picks the column in which the bot should place the next disc.
The considered moving options depend on the bot type.
:return: the column number where the bot should play the next move
"""
# print(PLAYERS[self._game._turn] + " is about to play :")
column = None
# In case the bot type is RANDOM, the bot checks for winning moves, and if there aren't,
# then picks a valid random move.
if self._type == RANDOM:
win_col = self.get_winning_move()
if win_col is not None:
column = win_col
else:
column = self.get_random_move()
# In case the bot type is RANDOM IMPROVED, the bot checks for winning moves, and if there aren't,
# then checks if there is any move that blocks a direct winning move for the opponent.
# If there is no such move, it picks a valid random move.
elif self._type == RANDOM_IMPR:
win_col = self.get_winning_move()
if win_col is not None:
# print("Winning column :", win_col)
column = win_col
else:
def_move = self.get_defensive_move()
if def_move is not None:
# print("Defensive column :", def_move)
column = def_move
else:
column = self.get_random_move()
# print("Random move", column)
elif self._type == MINIMAX:
column, minimax_score = self.minimax(
self._game._board,
self._depth,
-math.inf,
math.inf,
True,
self._pruning,
)
# print(column)
elif self._type == MONTE_CARLO:
o = Node(self._game.copy_state())
column = self.monte_carlo_tree_search(self._iteration, o, 2.0)
else:
column = 0
# print("-------------------------")
self._game.place(column)
def get_winning_move(self):
"""
Checks whether there is a winning column available for the next
move of the bot.
:return: winning column
"""
column = None
for c_win in range(self._game._cols):
for r in range(self._game._rows):
if self._game._board[c_win][r] == 0:
self._game._board[c_win][r] = self._game._turn
is_winner = self._game.check_win((c_win, r))
self._game._board[c_win][r] = 0
if is_winner:
column = c_win
return column
break
return column
def get_valid_locations(self, board):
"""
Returns all the valid columns where the player can play, aka the columns
that are not full
:param board: actual state of the game, board of the game
:return: list of all valid column indices
"""
free_cols = []
for i in range(COLUMN_COUNT):
if board[i][ROW_COUNT - 1] == 0:
free_cols.append(i)
# print()
if len(free_cols) == 0:
return None
return free_cols
def get_random_move(self):
"""
Picks a valid random column where the bot can play his next move.
:return: valid random column
"""
free_cols = self.get_valid_locations(self._game._board)
column = random.choice(free_cols)
return column
def get_defensive_move(self):
"""
Checks whether the bot could play a move that blocks a direct winning
move from the opponent.
:return: column to be played to avoid losing immediatly
"""
column = None
for c_win in range(self._game._cols):
for r in range(self._game._rows):
if self._game._board[c_win][r] == 0:
self._game._board[c_win][r] = -1 * self._game._turn
is_winner = self._game.check_win((c_win, r))
self._game._board[c_win][r] = 0
if is_winner:
column = c_win
return column
break
return column
class Node:
"""
This class is used to represent nodes of the tree of boards used during
Monte-Carlo Tree Search.
"""
def __init__(self, state, parent=None):
self.visits = 1
self.reward = 0.0
self.state = state # Instance of Connect4Game
self.children = []
self.children_moves = []
self.parent = parent
def add_child(self, child_state, move):
"""
Add a child to the current node.
:param child_state: state of the child to add
:param move: move to do to get to the newly added child
"""
child = Node(child_state, parent=self)
self.children.append(child)
self.children_moves.append(move)
def update(self, reward):
"""
Update the node's reward (indicates how good a certain node is
according to the MCTS algorithm)
:param reward: reward to be added to the node
"""
self.reward += reward
self.visits += 1
def fully_explored(self):
"""
Checks if the node is fully explored (which means we can not add
any more children to this node)
:return: True of False depending on if it is fully epxlored or not
"""
if len(self.children) == len(self.state.get_valid_locations()):
return True
return False
| 270 | 0 | 81 |
4db5922ca0eda76dacac4a88ee4dc802601c0259 | 1,501 | py | Python | ML Services/03.VS Code の利用/01.NativeScoreing_Linux.py | MasayukiOzawa/SQLServer-Util | 7dd1f9ab411955b85026c78e6e901ea4c57788f8 | [
"MIT"
] | 64 | 2016-06-15T07:39:40.000Z | 2022-03-22T02:19:50.000Z | ML Services/03.VS Code の利用/01.NativeScoreing_Linux.py | MasayukiOzawa/SQLServer-Util | 7dd1f9ab411955b85026c78e6e901ea4c57788f8 | [
"MIT"
] | 1 | 2016-09-24T17:41:04.000Z | 2016-11-09T01:31:17.000Z | ML Services/03.VS Code の利用/01.NativeScoreing_Linux.py | MasayukiOzawa/SQLServer-Util | 7dd1f9ab411955b85026c78e6e901ea4c57788f8 | [
"MIT"
] | 20 | 2017-03-07T19:20:00.000Z | 2022-03-22T02:34:50.000Z | from revoscalepy import rx_lin_mod, rx_serialize_model, rx_summary
import pandas as pd
import pyodbc
import os
conn_str = 'Driver=SQL Server;Server=<Server Name>;Database=MLDB;Uid=<User Name>;Pwd=<Password>;'
cnxn = pyodbc.connect(conn_str)
cnxn.setencoding("utf-8")
inputsql = 'select "RentalCount", "Year", "Month", "Day", "WeekDay", "Snow", "Holiday", "FWeekDay" from dbo.rental_data where Year < 2015'
rental_train_data = pd.read_sql(inputsql, cnxn)
rental_train_data["Holiday"] = rental_train_data["Holiday"].astype("category")
rental_train_data["Snow"] = rental_train_data["Snow"].astype("category")
rental_train_data["WeekDay"] = rental_train_data["WeekDay"].astype("category")
linmod_model = rx_lin_mod("RentalCount ~ Month + Day + WeekDay + Snow + Holiday", data = rental_train_data)
trained_model = rx_serialize_model(linmod_model, realtime_scoring_only = True)
print(rx_summary("RentalCount ~ Month + Day + WeekDay + Snow + Holiday", rental_train_data))
# Dump learned model to file
with open(r'c:\model\trained_model.pickle', mode='wb') as f:
f.write(trained_model)
# Dump learned model to Table
cursor=cnxn.cursor()
cursor.execute(\
'''
MERGE rental_models AS target
USING (SELECT ? as model_name) AS source
ON(target.model_name = source.model_name)
WHEN MATCHED THEN UPDATE SET native_model = ?
WHEN NOT MATCHED BY TARGET THEN INSERT (model_name, lang, native_model) VALUES(?,?,?);
''', \
("linear_model", trained_model, "linear_model", "Python", trained_model))
cnxn.commit()
| 40.567568 | 138 | 0.756163 | from revoscalepy import rx_lin_mod, rx_serialize_model, rx_summary
import pandas as pd
import pyodbc
import os
conn_str = 'Driver=SQL Server;Server=<Server Name>;Database=MLDB;Uid=<User Name>;Pwd=<Password>;'
cnxn = pyodbc.connect(conn_str)
cnxn.setencoding("utf-8")
inputsql = 'select "RentalCount", "Year", "Month", "Day", "WeekDay", "Snow", "Holiday", "FWeekDay" from dbo.rental_data where Year < 2015'
rental_train_data = pd.read_sql(inputsql, cnxn)
rental_train_data["Holiday"] = rental_train_data["Holiday"].astype("category")
rental_train_data["Snow"] = rental_train_data["Snow"].astype("category")
rental_train_data["WeekDay"] = rental_train_data["WeekDay"].astype("category")
linmod_model = rx_lin_mod("RentalCount ~ Month + Day + WeekDay + Snow + Holiday", data = rental_train_data)
trained_model = rx_serialize_model(linmod_model, realtime_scoring_only = True)
print(rx_summary("RentalCount ~ Month + Day + WeekDay + Snow + Holiday", rental_train_data))
# Dump learned model to file
with open(r'c:\model\trained_model.pickle', mode='wb') as f:
f.write(trained_model)
# Dump learned model to Table
cursor=cnxn.cursor()
cursor.execute(\
'''
MERGE rental_models AS target
USING (SELECT ? as model_name) AS source
ON(target.model_name = source.model_name)
WHEN MATCHED THEN UPDATE SET native_model = ?
WHEN NOT MATCHED BY TARGET THEN INSERT (model_name, lang, native_model) VALUES(?,?,?);
''', \
("linear_model", trained_model, "linear_model", "Python", trained_model))
cnxn.commit()
| 0 | 0 | 0 |
b08b481c6d54dd8f5ce2e1219e53ea74e1b33134 | 1,625 | py | Python | mushroom_rl/utils/features.py | PuzeLiu/mushroom-rl | 99942b425e66b4ddcc26009d7105dde23841e95d | [
"MIT"
] | 344 | 2020-01-10T09:45:02.000Z | 2022-03-30T09:48:28.000Z | mushroom_rl/utils/features.py | AmmarFahmy/mushroom-rl | 2625ee7f64d5613b3b9fba00f0b7a39fece88ca5 | [
"MIT"
] | 44 | 2020-01-23T03:00:56.000Z | 2022-03-25T17:14:22.000Z | mushroom_rl/utils/features.py | AmmarFahmy/mushroom-rl | 2625ee7f64d5613b3b9fba00f0b7a39fece88ca5 | [
"MIT"
] | 93 | 2020-01-10T21:17:58.000Z | 2022-03-31T17:58:52.000Z | import numpy as np
def uniform_grid(n_centers, low, high):
"""
This function is used to create the parameters of uniformly spaced radial
basis functions with 25% of overlap. It creates a uniformly spaced grid of
``n_centers[i]`` points in each ``ranges[i]``. Also returns a vector
containing the appropriate scales of the radial basis functions.
Args:
n_centers (list): number of centers of each dimension;
low (np.ndarray): lowest value for each dimension;
high (np.ndarray): highest value for each dimension.
Returns:
The uniformly spaced grid and the scale vector.
"""
n_features = len(low)
b = np.zeros(n_features)
c = list()
tot_points = 1
for i, n in enumerate(n_centers):
start = low[i]
end = high[i]
b[i] = (end - start) ** 2 / n ** 3
m = abs(start - end) / n
if n == 1:
c_i = (start + end) / 2.
c.append(np.array([c_i]))
else:
c_i = np.linspace(start - m * .1, end + m * .1, n)
c.append(c_i)
tot_points *= n
n_rows = 1
n_cols = 0
grid = np.zeros((tot_points, n_features))
for discrete_values in c:
i1 = 0
dim = len(discrete_values)
for i in range(dim):
for r in range(n_rows):
idx_r = r + i * n_rows
for c in range(n_cols):
grid[idx_r, c] = grid[r, c]
grid[idx_r, n_cols] = discrete_values[i1]
i1 += 1
n_cols += 1
n_rows *= len(discrete_values)
return grid, b
| 27.083333 | 78 | 0.547077 | import numpy as np
def uniform_grid(n_centers, low, high):
"""
This function is used to create the parameters of uniformly spaced radial
basis functions with 25% of overlap. It creates a uniformly spaced grid of
``n_centers[i]`` points in each ``ranges[i]``. Also returns a vector
containing the appropriate scales of the radial basis functions.
Args:
n_centers (list): number of centers of each dimension;
low (np.ndarray): lowest value for each dimension;
high (np.ndarray): highest value for each dimension.
Returns:
The uniformly spaced grid and the scale vector.
"""
n_features = len(low)
b = np.zeros(n_features)
c = list()
tot_points = 1
for i, n in enumerate(n_centers):
start = low[i]
end = high[i]
b[i] = (end - start) ** 2 / n ** 3
m = abs(start - end) / n
if n == 1:
c_i = (start + end) / 2.
c.append(np.array([c_i]))
else:
c_i = np.linspace(start - m * .1, end + m * .1, n)
c.append(c_i)
tot_points *= n
n_rows = 1
n_cols = 0
grid = np.zeros((tot_points, n_features))
for discrete_values in c:
i1 = 0
dim = len(discrete_values)
for i in range(dim):
for r in range(n_rows):
idx_r = r + i * n_rows
for c in range(n_cols):
grid[idx_r, c] = grid[r, c]
grid[idx_r, n_cols] = discrete_values[i1]
i1 += 1
n_cols += 1
n_rows *= len(discrete_values)
return grid, b
| 0 | 0 | 0 |
c9d22a7bf9b5dc1a732883747a14d59630652d12 | 16,528 | py | Python | inquiry_artifacts.py | saadxan/ExchangeBuddy | 5fa8b67feb8517fcb170b5207af6dbee864921d5 | [
"MIT"
] | null | null | null | inquiry_artifacts.py | saadxan/ExchangeBuddy | 5fa8b67feb8517fcb170b5207af6dbee864921d5 | [
"MIT"
] | null | null | null | inquiry_artifacts.py | saadxan/ExchangeBuddy | 5fa8b67feb8517fcb170b5207af6dbee864921d5 | [
"MIT"
] | null | null | null | from PyQt5.QtChart import *
import PyQt5.QtCore as QtCore
import PyQt5.QtGui as QtGui
import PyQt5.QtWidgets as QtWidgets
import config
import nav
import yfinance as yf
| 35.391863 | 120 | 0.615138 | from PyQt5.QtChart import *
import PyQt5.QtCore as QtCore
import PyQt5.QtGui as QtGui
import PyQt5.QtWidgets as QtWidgets
import config
import nav
import yfinance as yf
class ReturnButton(QtWidgets.QPushButton):
def __init__(self):
super(ReturnButton, self).__init__("Return")
self.clicked.connect(self.return_home)
def return_home(self):
nav.return_home()
class TickerHeader(QtWidgets.QLabel):
def __init__(self, ticker):
super(TickerHeader, self).__init__(ticker)
self.setFont(QtGui.QFont("Verdana", 20, QtGui.QFont.Bold))
class HelpButton(QtWidgets.QPushButton):
def __init__(self):
super(HelpButton, self).__init__("Help")
self.setFocusPolicy(QtCore.Qt.FocusPolicy.ClickFocus)
self.help_dialog = QtWidgets.QTextEdit()
self.help_dialog.setStyleSheet('''QTextEdit{border-image: url(bg.jpg);}''')
self.help_dialog.setMinimumWidth(700)
self.help_dialog.setReadOnly(True)
text = "Inquiry:\t-Use slide to manipulate chart to different periods (1w, 1m, ytd, 1y, a-t) real time.\n"
text += "\t-Use knob to change axis & line dimension to different parameters (open, volume, close) real time.\n"
text += "\t-Use button to toggle candle-lights representations for days (allowed for all periods except a-t).\n"
text += "\t-Hover over candle-light w/ mouse for expressions (the appropriate Date, Open, Close, High, Low).\n"
text += "\t-Calculations on stock (Low-High, Avg.Price, Avg.Volume, RSI, Dividends) will modify accordingly.\n"
text += "\t-Use mouse wheel (up/down) to zoom (in/out) respectively & right-click on graph to reset the zoom.\n"
text += "\t-Favorite/Unfavorite button can be clicked to add/remove the stock from user's favorite list.\n"
self.help_dialog.setText(text)
self.clicked.connect(self.show_help_dialog)
def show_help_dialog(self):
self.help_dialog.show()
class NotesButton(QtWidgets.QPushButton):
def __init__(self, ticker):
super(NotesButton, self).__init__("Notes")
self.ticker = ticker
self.notes_editor = QtWidgets.QTextEdit()
self.notes_editor.setStyleSheet('''QTextEdit{border-image: url(bg.jpg);}''')
self.notes_editor.closeEvent = self.save_notes_action
self.clicked.connect(self.open_notes_action)
def open_notes_action(self):
if self.ticker in config.notes.keys():
self.notes_editor.setText(config.notes[self.ticker])
self.notes_editor.show()
def save_notes_action(self, a0: QtGui.QCloseEvent):
notes = self.notes_editor.toPlainText()
if notes != "":
config.notes[self.ticker] = notes
nav.refresh_home()
class FavoriteButton(QtWidgets.QPushButton):
def __init__(self, ticker):
super(FavoriteButton, self).__init__()
self.ticker = ticker
self.setCheckable(True)
if self.ticker in config.fav:
self.setChecked(True)
self.setText("Unfavorite")
else:
self.setChecked(False)
self.setText("Favorite")
self.clicked.connect(self.add_remove_favorite)
def add_remove_favorite(self):
if not self.isChecked():
config.fav.remove(self.ticker)
self.setText("Favorite")
else:
if self.ticker not in config.fav:
config.fav.append(self.ticker)
self.setText("Unfavorite")
class StockChartView(QChartView):
def __init__(self, chart):
super(StockChartView, self).__init__(chart)
def validate_move(self):
min_val = self.chart().axisX().min()
true_min = self.chart().initial_range[0]
if min_val < true_min:
self.chart().axisX().setMin(true_min)
return False
max_val = self.chart().axisX().max()
true_max = self.chart().initial_range[1]
if max_val > true_max:
self.chart().axisX().setMax(true_max)
return False
return True
def wheelEvent(self, a0: QtGui.QWheelEvent) -> None:
if a0.angleDelta().y() > 0:
if self.validate_move() is True:
self.zoom_action(1.01, a0.pos().x() - self.chart().plotArea().x())
elif a0.angleDelta().y() < 0 and type(self.chart().axisX().min()) is not str:
if self.validate_move() is True:
self.zoom_action(0.99, a0.pos().x() - self.chart().plotArea().x())
def zoom_action(self, matrix, midpoint):
plot_area = self.chart().plotArea()
width = plot_area.width()
plot_area.setWidth(float(width / matrix))
mid_matrix = float(midpoint / width)
left_move_factor = midpoint - (plot_area.width() * mid_matrix)
plot_area.moveLeft(plot_area.x() + left_move_factor)
self.chart().zoomIn(plot_area)
def mousePressEvent(self, event):
if event.button() == 2:
self.chart().zoomReset()
self.validate_move()
class StockChart(QChart):
def __init__(self, ticker, period='7d', axis='Close'):
super(StockChart, self).__init__()
self.header = ticker
self.ticker = yf.Ticker(ticker)
self.period = period
self.axis = axis
self.candle_status = False
self.initial_range = None
self.entry_amount = 0
self.create_chart(period)
self.legend().hide()
self.setTheme(QChart.ChartThemeBlueCerulean)
self.setMinimumHeight(375)
self.setTitle("{:s} chart of {:s}".format(period.upper(), ticker))
self.axisX().setLabelsFont(QtGui.QFont("Verdana", 10))
self.axisY().setLabelsFont(QtGui.QFont("Verdana", 10))
def create_chart(self, period):
stock_history = self.ticker.history(period=period)[self.axis]
prices = stock_history.tolist()
dates = stock_history.index.tolist()
series = QLineSeries()
for date, price in zip(dates, prices):
series.append((date.timestamp() + 86400) * 1000, price)
self.entry_amount = len(series)
x_date_axis = QDateTimeAxis()
x_date_axis.setFormat("MM/dd/yyyy")
x_date_axis.setLabelsAngle(-45)
if len(series) < 16:
x_date_axis.setTickCount(len(series))
else:
x_date_axis.setTickCount(16)
y_value_axis = QValueAxis()
if self.axis != 'Volume':
y_value_axis.setLabelFormat("$%.2f")
else:
y_value_axis.setLabelFormat("%.0f")
self.addSeries(series)
self.setAxisX(x_date_axis, series)
self.setAxisY(y_value_axis, series)
self.initial_range = (self.axisX().min(), self.axisX().max())
if period == 'max':
self.candle_status = False
if self.candle_status:
self.toggle_candle_series(True)
self.axisX().setLabelsFont(QtGui.QFont("Verdana", 10))
self.axisY().setLabelsFont(QtGui.QFont("Verdana", 10))
def update_chart(self, period, axis):
self.period = period
self.axis = axis
self.setTitle("{:s} chart of {:s}".format(period.upper(), self.header))
self.removeAllSeries()
self.removeAxis(self.axisX())
self.removeAxis(self.axisY())
self.create_chart(period)
def toggle_candle_series(self, status):
if status is True:
self.candle_status = status
entries = self.ticker.history(self.period)
dates = []
series = CandleStickDay()
for i in range(len(entries)):
candle_set = QCandlestickSet()
candle_set.setLow(entries['Low'][i])
candle_set.setHigh(entries['High'][i])
candle_set.setOpen(entries['Open'][i])
candle_set.setClose(entries['Close'][i])
candle_set.setTimestamp((entries.index[i].timestamp()))
series.append(candle_set)
off = 86400
date = QtCore.QDateTime.fromSecsSinceEpoch((entries.index[i].timestamp() + off)).toString("MM/dd/yyyy")
dates.append(CandleDayString(date))
self.entry_amount = len(dates)
x_bar_axis = QBarCategoryAxis()
x_bar_axis.setCategories(dates)
x_bar_axis.setGridLineVisible(False)
if self.entry_amount < 30:
x_bar_axis.setLabelsAngle(-45)
else:
x_bar_axis.setLabelsAngle(-90)
y_value_axis = QValueAxis(series)
if self.axis != 'Volume':
y_value_axis.setLabelFormat("$%.2f")
else:
y_value_axis.setLabelFormat("%.0f")
self.addSeries(series)
self.setAxisX(x_bar_axis, series)
self.setAxisY(y_value_axis, series)
self.removeAxis(self.axisY())
self.removeAxis(self.axisX())
self.initial_range = (self.axisX().min(), self.axisX().max())
self.axisX().setLabelsFont(QtGui.QFont("Verdana", 10))
self.axisY().setLabelsFont(QtGui.QFont("Verdana", 10))
else:
self.candle_status = status
self.update_chart(self.period, self.axis)
class CandleDayString(str):
def __new__(cls, *args, **kwargs):
return str.__new__(cls, *args, **kwargs)
def __eq__(self, x: str) -> bool:
return QtCore.QDateTime.fromString(self, "MM/dd/yyyy") == QtCore.QDateTime.fromString(str(x), "MM/dd/yyyy")
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, x: str) -> bool:
return QtCore.QDateTime.fromString(self, "MM/dd/yyyy") < QtCore.QDateTime.fromString(str(x), "MM/dd/yyyy")
def __gt__(self, x: str) -> bool:
return not self.__lt__(x)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
class CandleStickDay(QCandlestickSeries):
def __init__(self):
super(CandleStickDay, self).__init__()
self.setIncreasingColor(QtGui.QColor(0, 200, 0))
self.setDecreasingColor(QtGui.QColor(200, 0, 0))
self.parent_chart = get_this('chart')
self.hovered.connect(self.action)
def action(self, hovered, cs):
if hovered is True:
date = QtCore.QDateTime.fromMSecsSinceEpoch((cs.timestamp() + 86400) * 1000).toString("MM/dd/yyyy")
tool = "{:s}:\nOpen:${:.2f}\nClose:${:.2f}".format(date, cs.open(), cs.close())
tool += "\nLow:${:.2f}\nHigh:${:.2f}".format(cs.low(), cs.high())
self.parent_chart.setToolTip(tool)
else:
self.parent_chart.setToolTip("")
class InfoPiece(QtWidgets.QTableWidget):
def __init__(self, ticker, period='7d'):
super(InfoPiece, self).__init__()
self.setSizeAdjustPolicy(QtWidgets.QTableWidget.AdjustToContentsOnFirstShow)
self.setStyleSheet('''InfoPiece{background-color: lightsteelblue;}
InfoPiece QTableCornerButton::section{background-color: lightsteelblue;}''')
self.horizontalHeader().setStyleSheet("background-color: lightsteelblue;")
self.verticalHeader().setStyleSheet("background-color: lightsteelblue;")
self.setShowGrid(False)
self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.ticker_symbol = ticker
self.ticker = yf.Ticker(ticker)
self.period = period
self.info_table = []
self.update_info(self.period)
def update_info(self, period):
self.period = period
stock_history = self.ticker.history(period=period)
last = len(stock_history)
prev_close_price = stock_history.iloc[last - 2]['Close']
today_open_price = stock_history.iloc[last - 1]['Open']
low_price = stock_history['Low'].min()
high_price = stock_history['High'].max()
avg_price = (stock_history['Open'].mean() + stock_history['Close'].mean()) / 2
avg_volume = stock_history['Volume'].mean()
dividends = stock_history['Dividends'].sum()
dollar_volume = stock_history.iloc[last - 1]['Volume'] * today_open_price
avg_up = []
avg_down = []
for entry_close, entry_open in zip(list(stock_history['Close']), list(stock_history['Open'])):
move = entry_close - entry_open
if move >= 0:
avg_up.append(move)
else:
avg_down.append(move)
if len(avg_up) != 0 and len(avg_down) != 0:
avg_gain = sum(avg_up) / len(avg_up)
avg_loss = sum(avg_down) / len(avg_down)
rsi = 100 - (100 / (1 + (avg_gain / abs(avg_loss))))
else:
rsi = 0.00
self.info_table.append(("Previous Close:", "${:,.2f}".format(prev_close_price)))
self.info_table.append(("Low - High:", "${:,.2f} - ${:,.2f}".format(low_price, high_price)))
self.info_table.append(("Open:", "${:,.2f}".format(today_open_price)))
self.info_table.append(("Average Price:", "${:,.2f}".format(avg_price)))
self.info_table.append(("Dollar Volume:", "${:,.0f}".format(dollar_volume)))
self.info_table.append(("Average Volume:", "{:,.0f}".format(avg_volume)))
self.info_table.append(("RSI({:d}):".format(last), "{:.2f}".format(rsi)))
self.info_table.append(("Dividends:", "{:.2f}".format(dividends)))
self.build_table()
def build_table(self):
self.horizontalScrollBar().setDisabled(True)
self.setRowCount(len(self.info_table))
self.setColumnCount(1)
self.setColumnWidth(0, 200)
self.setMaximumWidth(310)
self.setMaximumHeight(240)
self.setHorizontalHeaderLabels(["{:s} Stats".format(self.ticker_symbol)])
for i in range(self.rowCount()):
title_value = self.info_table.pop(0)
self.setVerticalHeaderItem(i, QtWidgets.QTableWidgetItem(title_value[0]))
self.setItem(i, 0, QtWidgets.QTableWidgetItem(title_value[1]))
class PeriodSlider(QtWidgets.QSlider):
def __init__(self):
super(PeriodSlider, self).__init__(QtCore.Qt.Orientation.Horizontal)
self.setFixedSize(230, 30)
self.setRange(0, 4)
self.setTickInterval(1)
self.setValue(4)
self.valueChanged.connect(self.change_period)
def change_period(self):
stock_chart = get_this('chart')
info_piece = get_this('info')
cur_value = self.value()
period = ''
if cur_value == 0:
period = 'max'
elif cur_value == 1:
period = 'ytd'
elif cur_value == 2:
period = '1y'
elif cur_value == 3:
period = '30d'
elif cur_value == 4:
period = '7d'
stock_chart.update_chart(period, stock_chart.axis)
info_piece.update_info(period)
class AxisDial(QtWidgets.QDial):
def __init__(self):
super(AxisDial, self).__init__()
self.setFixedSize(QtCore.QSize(250, 75))
self.setRange(0, 2)
self.setNotchesVisible(True)
self.valueChanged.connect(self.change_axis)
def change_axis(self):
stock_chart = get_this('chart')
cur_value = self.value()
axis = ''
if cur_value == 0:
axis = 'Close'
elif cur_value == 1:
axis = 'Volume'
elif cur_value == 2:
axis = 'Open'
stock_chart.update_chart(stock_chart.period, axis)
class CandlestickToggle(QtWidgets.QPushButton):
def __init__(self):
super(CandlestickToggle, self).__init__("Show Candlesticks")
self.setFixedSize(QtCore.QSize(250, 50))
self.setCheckable(True)
self.setChecked(False)
self.clicked.connect(self.toggle_candles)
def toggle_candles(self):
stock_chart = get_this('chart')
if stock_chart.period == 'max':
self.setChecked(False)
if not self.isChecked():
stock_chart.toggle_candle_series(False)
self.setText("Show Candlesticks")
elif self.isChecked():
stock_chart.toggle_candle_series(True)
self.setText("Hide Candlesticks")
def get_this(item='chart' or 'info'):
if hasattr(config.stk.widget(1), item):
return getattr(config.stk.widget(1), item)
else:
return getattr(config.stk.widget(2), item)
| 14,809 | 214 | 1,321 |
ac895a9dfd95f6ee6833018750b52a1c10056528 | 2,206 | py | Python | tryalgo/dinic.py | xcarcelle/tryalgo | c159fbffbea0a4e8b70e8898c31c62c7e08a3865 | [
"MIT"
] | null | null | null | tryalgo/dinic.py | xcarcelle/tryalgo | c159fbffbea0a4e8b70e8898c31c62c7e08a3865 | [
"MIT"
] | null | null | null | tryalgo/dinic.py | xcarcelle/tryalgo | c159fbffbea0a4e8b70e8898c31c62c7e08a3865 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Maximum flow by Dinic
# jill-jênn vie et christoph dürr - 2015-2018
from collections import deque
from sys import setrecursionlimit
from tryalgo.graph import add_reverse_arcs
setrecursionlimit(5010) # necessary for big graphs
# snip{
def dinic(graph, capacity, source, target):
"""Maximum flow by Dinic
:param graph: directed graph in listlist or listdict format
:param capacity: in matrix format or same listdict graph
:param int source: vertex
:param int target: vertex
:returns: skew symmetric flow matrix, flow value
:complexity: :math:`O(|V|^2 |E|)`
"""
assert source != target
add_reverse_arcs(graph, capacity)
Q = deque()
total = 0
n = len(graph)
flow = [[0] * n for u in range(n)] # flow initially empty
while True: # repeat while we can increase
Q.appendleft(source)
lev = [None] * n # build levels, None = inaccessible
lev[source] = 0 # by BFS
while Q:
u = Q.pop()
for v in graph[u]:
if lev[v] is None and capacity[u][v] > flow[u][v]:
lev[v] = lev[u] + 1
Q.appendleft(v)
if lev[target] is None: # stop if sink is not reachable
return flow, total
up_bound = sum(capacity[source][v] for v in graph[source]) - total
total += _dinic_step(graph, capacity, lev, flow, source, target,
up_bound)
def _dinic_step(graph, capacity, lev, flow, u, target, limit):
""" tenter de pousser le plus de flot de u à target, sans dépasser limit
"""
if limit <= 0:
return 0
if u == target:
return limit
val = 0
for v in graph[u]:
residual = capacity[u][v] - flow[u][v]
if lev[v] == lev[u] + 1 and residual > 0:
z = min(limit, residual)
aug = _dinic_step(graph, capacity, lev, flow, v, target, z)
flow[u][v] += aug
flow[v][u] -= aug
val += aug
limit -= aug
if val == 0:
lev[u] = None # remove unreachable node
return val
# snip}
| 31.070423 | 76 | 0.560743 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Maximum flow by Dinic
# jill-jênn vie et christoph dürr - 2015-2018
from collections import deque
from sys import setrecursionlimit
from tryalgo.graph import add_reverse_arcs
setrecursionlimit(5010) # necessary for big graphs
# snip{
def dinic(graph, capacity, source, target):
"""Maximum flow by Dinic
:param graph: directed graph in listlist or listdict format
:param capacity: in matrix format or same listdict graph
:param int source: vertex
:param int target: vertex
:returns: skew symmetric flow matrix, flow value
:complexity: :math:`O(|V|^2 |E|)`
"""
assert source != target
add_reverse_arcs(graph, capacity)
Q = deque()
total = 0
n = len(graph)
flow = [[0] * n for u in range(n)] # flow initially empty
while True: # repeat while we can increase
Q.appendleft(source)
lev = [None] * n # build levels, None = inaccessible
lev[source] = 0 # by BFS
while Q:
u = Q.pop()
for v in graph[u]:
if lev[v] is None and capacity[u][v] > flow[u][v]:
lev[v] = lev[u] + 1
Q.appendleft(v)
if lev[target] is None: # stop if sink is not reachable
return flow, total
up_bound = sum(capacity[source][v] for v in graph[source]) - total
total += _dinic_step(graph, capacity, lev, flow, source, target,
up_bound)
def _dinic_step(graph, capacity, lev, flow, u, target, limit):
""" tenter de pousser le plus de flot de u à target, sans dépasser limit
"""
if limit <= 0:
return 0
if u == target:
return limit
val = 0
for v in graph[u]:
residual = capacity[u][v] - flow[u][v]
if lev[v] == lev[u] + 1 and residual > 0:
z = min(limit, residual)
aug = _dinic_step(graph, capacity, lev, flow, v, target, z)
flow[u][v] += aug
flow[v][u] -= aug
val += aug
limit -= aug
if val == 0:
lev[u] = None # remove unreachable node
return val
# snip}
| 0 | 0 | 0 |
6a518aff98243781610501ac5c19e19219a5d6bf | 17,572 | py | Python | examples/pytorch/text-classification/run_xnli.py | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 | [
"Apache-2.0"
] | 5 | 2020-09-01T09:15:48.000Z | 2020-09-15T03:25:05.000Z | examples/pytorch/text-classification/run_xnli.py | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 | [
"Apache-2.0"
] | 2 | 2022-03-08T04:58:59.000Z | 2022-03-19T03:45:14.000Z | examples/pytorch/text-classification/run_xnli.py | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 | [
"Apache-2.0"
] | 3 | 2020-08-20T04:46:25.000Z | 2020-10-14T08:39:13.000Z | #!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM).
Adapted from `examples/text-classification/run_glue.py`"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.20.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
server_ip: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
server_port: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
language: str = field(
default=None, metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}
)
train_language: Optional[str] = field(
default=None, metadata={"help": "Train language if it is different from the evaluation language."}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
do_lower_case: Optional[bool] = field(
default=False,
metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
)
},
)
ignore_mismatched_sizes: bool = field(
default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
)
if __name__ == "__main__":
main()
| 39.665914 | 119 | 0.66936 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning multi-lingual models on XNLI (e.g. Bert, DistilBERT, XLM).
Adapted from `examples/text-classification/run_glue.py`"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.20.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
server_ip: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
server_port: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
language: str = field(
default=None, metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."}
)
train_language: Optional[str] = field(
default=None, metadata={"help": "Train language if it is different from the evaluation language."}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
do_lower_case: Optional[bool] = field(
default=False,
metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
)
},
)
ignore_mismatched_sizes: bool = field(
default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup distant debugging if needed
if data_args.server_ip and data_args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(data_args.server_ip, data_args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
train_dataset = load_dataset(
"xnli",
model_args.language,
split="train",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
train_dataset = load_dataset(
"xnli",
model_args.train_language,
split="train",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
label_list = train_dataset.features["label"].names
if training_args.do_eval:
eval_dataset = load_dataset(
"xnli",
model_args.language,
split="validation",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
label_list = eval_dataset.features["label"].names
if training_args.do_predict:
predict_dataset = load_dataset(
"xnli",
model_args.language,
split="test",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
label_list = predict_dataset.features["label"].names
# Labels
num_labels = len(label_list)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task="xnli",
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
do_lower_case=model_args.do_lower_case,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
def preprocess_function(examples):
# Tokenize the texts
return tokenizer(
examples["premise"],
examples["hypothesis"],
padding=padding,
max_length=data_args.max_seq_length,
truncation=True,
)
if training_args.do_train:
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
if training_args.do_eval:
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Get the metric function
metric = load_metric("xnli")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.argmax(preds, axis=1)
return metric.compute(predictions=preds, references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict")
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
predictions = np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, "predictions.txt")
if trainer.is_world_process_zero():
with open(output_predict_file, "w") as writer:
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
item = label_list[item]
writer.write(f"{index}\t{item}\n")
if __name__ == "__main__":
main()
| 11,527 | 0 | 23 |
718bfd06695d1397dd6982ff8bd6f08d63a8642e | 2,059 | py | Python | tests/test_vlan.py | nazarii-gnydyn/sonic-swss | 00ea0ab01fe2877c0c8d5aba3d1e57497a48da80 | [
"Apache-2.0"
] | null | null | null | tests/test_vlan.py | nazarii-gnydyn/sonic-swss | 00ea0ab01fe2877c0c8d5aba3d1e57497a48da80 | [
"Apache-2.0"
] | null | null | null | tests/test_vlan.py | nazarii-gnydyn/sonic-swss | 00ea0ab01fe2877c0c8d5aba3d1e57497a48da80 | [
"Apache-2.0"
] | null | null | null | from swsscommon import swsscommon
import time
import re
import json
| 27.824324 | 79 | 0.609519 | from swsscommon import swsscommon
import time
import re
import json
def test_VlanMemberCreation(dvs):
db = swsscommon.DBConnector(4, dvs.redis_sock, 0)
adb = swsscommon.DBConnector(1, dvs.redis_sock, 0)
# create vlan in config db
tbl = swsscommon.Table(db, "VLAN", '|')
fvs = swsscommon.FieldValuePairs([("vlanid", "2")])
tbl.set("Vlan2", fvs)
time.sleep(1)
# check vlan in asic db
atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN")
keys = atbl.getKeys()
assert len(keys) == 2
vlan_oid = None
for k in keys:
if k == dvs.asicdb.default_vlan_id:
continue
(status, fvs) = atbl.get(k)
assert status == True
if fvs[0][0] == "SAI_VLAN_ATTR_VLAN_ID":
assert fvs[0][1] == '2'
vlan_oid = k
assert vlan_oid != None
# create vlan member in config db
tbl = swsscommon.Table(db, "VLAN_MEMBER", '|')
fvs = swsscommon.FieldValuePairs([("tagging_mode", "untagged")])
tbl.set("Vlan2|Ethernet0", fvs)
time.sleep(1)
# check vlan member in asic db
bridge_port_map = {}
atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT")
keys = atbl.getKeys()
for k in keys:
(status, fvs) = atbl.get(k)
assert status == True
for fv in fvs:
if fv[0] == "SAI_BRIDGE_PORT_ATTR_PORT_ID":
bridge_port_map[k] = fv[1]
atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER")
keys = atbl.getKeys()
assert len(keys) == 1
(status, fvs) = atbl.get(keys[0])
assert status == True
for fv in fvs:
if fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_TAGGING_MODE":
assert fv[1] == "SAI_VLAN_TAGGING_MODE_UNTAGGED"
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_VLAN_ID":
assert fv[1] == vlan_oid
elif fv[0] == "SAI_VLAN_MEMBER_ATTR_BRIDGE_PORT_ID":
assert dvs.asicdb.portoidmap[bridge_port_map[fv[1]]] == "Ethernet0"
else:
assert False
| 1,965 | 0 | 26 |
0d6a499f18fa307168dbe254580e20c8d352547c | 2,243 | py | Python | spar_python/data_generation/progress_reporters.py | nathanawmk/SPARTA | 6eeb28b2dd147088b6e851876b36eeba3e700f16 | [
"BSD-2-Clause"
] | 37 | 2017-06-09T13:55:23.000Z | 2022-01-28T12:51:17.000Z | spar_python/data_generation/progress_reporters.py | nathanawmk/SPARTA | 6eeb28b2dd147088b6e851876b36eeba3e700f16 | [
"BSD-2-Clause"
] | null | null | null | spar_python/data_generation/progress_reporters.py | nathanawmk/SPARTA | 6eeb28b2dd147088b6e851876b36eeba3e700f16 | [
"BSD-2-Clause"
] | 5 | 2017-06-09T13:55:26.000Z | 2021-11-11T03:51:56.000Z | # *****************************************************************
# Copyright 2015 MIT Lincoln Laboratory
# Project: SPAR
# Authors: JCH
# Description: Various classes to inform user of progress
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 19 Oct 2012 jch Original file
# *****************************************************************
"""
This module holds various progress-informers: classes which will keep track
of various forms of progress (file-processing, row-generating, etc) and
keep the user appropriately informed of progress.
"""
import os
import sys
this_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = os.path.join(this_dir, '..', '..')
sys.path.append(base_dir)
import datetime
| 32.507246 | 75 | 0.544806 | # *****************************************************************
# Copyright 2015 MIT Lincoln Laboratory
# Project: SPAR
# Authors: JCH
# Description: Various classes to inform user of progress
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 19 Oct 2012 jch Original file
# *****************************************************************
"""
This module holds various progress-informers: classes which will keep track
of various forms of progress (file-processing, row-generating, etc) and
keep the user appropriately informed of progress.
"""
import os
import sys
this_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = os.path.join(this_dir, '..', '..')
sys.path.append(base_dir)
import datetime
class RowAggregatorProgressReporter(object):
def __init__(self, logger, rows_expected):
self.__logger = logger
self.__num_created_rows = 0
self.__notification_rate = 1000
self.__start_t = datetime.datetime.now()
self.__num_total_rows = rows_expected
def add(self, num_rows):
self.__num_created_rows += num_rows
num_so_far = self.__num_created_rows
if num_so_far % self.__notification_rate == 0:
now_t = datetime.datetime.now()
elapsed = now_t - self.__start_t
rows_per_sec = \
float(num_so_far) / elapsed.total_seconds()
seconds_left = \
float(self.__num_total_rows - num_so_far) / rows_per_sec
left_td = datetime.timedelta(0, seconds_left)
self.__logger.info("%d rows processed. "
"Estimated time remaining: %s"
% (num_so_far, left_td))
def add_list(self, results_list):
self.add( len(results_list) )
def done(self):
end_t = datetime.datetime.now()
elapsed = end_t - self.__start_t
self.__logger.info("Done. %s rows successfully generated. "
"Elapsed time: %s"
% (self.__num_created_rows,
elapsed))
| 1,217 | 23 | 147 |
9902f0b6820f734ff15a736daf7d31ba2b2b9405 | 10,053 | py | Python | better_storylines/src/models.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | better_storylines/src/models.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | better_storylines/src/models.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for next-sentence prediction task on ROCStories.
"""
import collections
from absl import logging
import gin
import gin.tf
import tensorflow.compat.v2 as tf
gfile = tf.io.gfile
@gin.configurable
class LinearModel(tf.keras.Model):
"""Multi-layer perceptron with embedding matrix at end."""
def __init__(
self,
num_input_sentences=None,
embedding_matrix=None,
embedding_dim=None):
"""Creates a small MLP, then multiplies outputs by embedding matrix.
Either an embedding matrix or an embedding dimension should be specified.
If the former, predictions are made by multiplying the NN outputs by this
embedding matrix. If only an embedding dimension is provided, call()
outputs an embedding, but no predictions.
Args:
num_input_sentences: Integer number of input sentences.
embedding_matrix: Matrix of size [embedding_dim * num_last_ouputs]
embedding_dim: Matrix of size [embedding_dim * num_last_ouputs]
"""
super(LinearModel, self).__init__()
assert (embedding_matrix is None) ^ (embedding_dim is None)
self._loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)
self._num_input_sentences = num_input_sentences
self.embedding_matrix = embedding_matrix
if self.embedding_matrix is not None:
self._embedding_dim = self.embedding_matrix.shape[1]
else:
self._embedding_dim = embedding_dim
x_input, x_output = self._build_network()
super(LinearModel, self).__init__(
inputs=x_input, outputs=x_output, name='model')
@gin.configurable('LinearModel.hparams')
def _build_network(self,
relu_layers=(2048, 1024),
dropout_amount=0.5,
normalize_embeddings=False,
final_dropout=True,
small_context_loss_weight=0.0,
max_num_distractors=-1):
"""Builds the network.
Args:
relu_layers: Dimensions of linear+RELU layers to add to MLP. These do not
need to include the final projection down to embedding_dim.
dropout_amount: If training, how much dropout to use in each layer.
normalize_embeddings: If True, normalize sentence embeddings (both
input and predicted) to mean 0, unit variance.
final_dropout: If True, adds dropout to the final embedding layer.
small_context_loss_weight: If >0, in addition to the loss with many
distractors, add another loss where the only distractors are the
sentences of the context.
max_num_distractors: If non-negative, randomly pick a window of this many
distractors around the true 5th sentence.
Returns:
A Keras model.
"""
self.small_context_loss_weight = small_context_loss_weight
self._max_num_distractors = max_num_distractors
# x starts off with dimension [batch_size x num_sentences x emb_size].
# Convert it to [batch_size x (num_sentences*emb_size)].
x_input = tf.keras.Input(
shape=[self._num_input_sentences, self._embedding_dim])
flattened_shape = [-1, self._num_input_sentences * self._embedding_dim]
x = tf.reshape(x_input, flattened_shape)
mlp = tf.keras.Sequential()
if normalize_embeddings:
mlp.add(tf.keras.layers.LayerNormalization(axis=1))
for layer_output_dim in relu_layers:
mlp.add(
tf.keras.layers.Dense(layer_output_dim, activation='relu'))
mlp.add(tf.keras.layers.Dropout(dropout_amount))
# Final layer bring us back to embedding dimension.
mlp.add(tf.keras.layers.Dense(self._embedding_dim, activation='linear'))
if final_dropout:
mlp.add(tf.keras.layers.Dropout(dropout_amount))
if normalize_embeddings:
mlp.add(tf.keras.layers.LayerNormalization(axis=1))
return x_input, mlp(x)
def create_metrics(self):
"""Outputs a dictionary containing all the metrics we want to log."""
metrics = [
tf.keras.metrics.Mean(name='train_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(name='train_acc'),
tf.keras.metrics.Accuracy(name='valid_nolabel_acc'),
tf.keras.metrics.Accuracy(name='train_subset_acc'),
tf.keras.metrics.Accuracy(name='valid_spring2016_acc'),
tf.keras.metrics.Accuracy(name='valid_winter2018_acc')]
if self.small_context_loss_weight > 0.0:
metrics.append(tf.keras.metrics.Mean(name='main_loss'))
metrics.append(tf.keras.metrics.Mean(name='small_context_loss'))
metrics = collections.OrderedDict((m.name, m) for m in metrics)
return metrics
@gin.configurable
class ResidualModel(LinearModel):
"""Residual multi-layer perceptron with embedding matrix at end."""
@gin.configurable('ResidualModel.hparams')
def _build_network(self,
residual_layer_size=1024,
num_residual_layers=2,
dropout_amount=0.5,
small_context_loss_weight=0.0,
max_num_distractors=-1):
"""Builds an MLP with residual connections.
Args:
residual_layer_size: Dimension for linear layer to add to MLP.
num_residual_layers: Number of residual layer.
dropout_amount: If training, how much dropout to use in each layer.
small_context_loss_weight: If >0, in addition to the loss with many
distractors, add another loss where the only distractors are the
sentences of the context.
max_num_distractors: The maximum number of distractors provided at each
train step.
Returns:
The input and output tensors for the network, with the input being a
placeholder variable.
"""
self.small_context_loss_weight = small_context_loss_weight
self._max_num_distractors = max_num_distractors
# x starts off with dimension [batch_size x num_sentences x emb_size].
# Convert it to [batch_size x (num_sentences*emb_size)].
x_input = tf.keras.Input(
shape=[self._num_input_sentences, self._embedding_dim])
flattened_shape = [-1, self._num_input_sentences * self._embedding_dim]
x = tf.reshape(x_input, flattened_shape)
x = tf.keras.layers.LayerNormalization(axis=1)(x)
# First bring dimension down to desired.
x = tf.keras.layers.Dense(residual_layer_size)(x)
# Add specified number of residual layers.
for _ in range(num_residual_layers):
x = block(x, residual_layer_size)
# Go back up to desired dimension.
x = tf.keras.layers.Dense(self._embedding_dim, activation='linear')(x)
x = tf.keras.layers.LayerNormalization(axis=1)(x)
return x_input, x
@gin.configurable(allowlist=['network_class'])
def build_model(num_input_sentences,
embedding_matrix=None,
embedding_dim=None,
network_class=None):
"""Creates the model object and returns it."""
if network_class is None:
# Default to the fully connected model.
model = LinearModel(num_input_sentences, embedding_matrix, embedding_dim)
else:
model = network_class(num_input_sentences, embedding_matrix, embedding_dim)
return model
| 38.079545 | 79 | 0.693723 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for next-sentence prediction task on ROCStories.
"""
import collections
from absl import logging
import gin
import gin.tf
import tensorflow.compat.v2 as tf
gfile = tf.io.gfile
@gin.configurable
class LinearModel(tf.keras.Model):
"""Multi-layer perceptron with embedding matrix at end."""
def __init__(
self,
num_input_sentences=None,
embedding_matrix=None,
embedding_dim=None):
"""Creates a small MLP, then multiplies outputs by embedding matrix.
Either an embedding matrix or an embedding dimension should be specified.
If the former, predictions are made by multiplying the NN outputs by this
embedding matrix. If only an embedding dimension is provided, call()
outputs an embedding, but no predictions.
Args:
num_input_sentences: Integer number of input sentences.
embedding_matrix: Matrix of size [embedding_dim * num_last_ouputs]
embedding_dim: Matrix of size [embedding_dim * num_last_ouputs]
"""
super(LinearModel, self).__init__()
assert (embedding_matrix is None) ^ (embedding_dim is None)
self._loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True)
self._num_input_sentences = num_input_sentences
self.embedding_matrix = embedding_matrix
if self.embedding_matrix is not None:
self._embedding_dim = self.embedding_matrix.shape[1]
else:
self._embedding_dim = embedding_dim
x_input, x_output = self._build_network()
super(LinearModel, self).__init__(
inputs=x_input, outputs=x_output, name='model')
@gin.configurable('LinearModel.hparams')
def _build_network(self,
relu_layers=(2048, 1024),
dropout_amount=0.5,
normalize_embeddings=False,
final_dropout=True,
small_context_loss_weight=0.0,
max_num_distractors=-1):
"""Builds the network.
Args:
relu_layers: Dimensions of linear+RELU layers to add to MLP. These do not
need to include the final projection down to embedding_dim.
dropout_amount: If training, how much dropout to use in each layer.
normalize_embeddings: If True, normalize sentence embeddings (both
input and predicted) to mean 0, unit variance.
final_dropout: If True, adds dropout to the final embedding layer.
small_context_loss_weight: If >0, in addition to the loss with many
distractors, add another loss where the only distractors are the
sentences of the context.
max_num_distractors: If non-negative, randomly pick a window of this many
distractors around the true 5th sentence.
Returns:
A Keras model.
"""
self.small_context_loss_weight = small_context_loss_weight
self._max_num_distractors = max_num_distractors
# x starts off with dimension [batch_size x num_sentences x emb_size].
# Convert it to [batch_size x (num_sentences*emb_size)].
x_input = tf.keras.Input(
shape=[self._num_input_sentences, self._embedding_dim])
flattened_shape = [-1, self._num_input_sentences * self._embedding_dim]
x = tf.reshape(x_input, flattened_shape)
mlp = tf.keras.Sequential()
if normalize_embeddings:
mlp.add(tf.keras.layers.LayerNormalization(axis=1))
for layer_output_dim in relu_layers:
mlp.add(
tf.keras.layers.Dense(layer_output_dim, activation='relu'))
mlp.add(tf.keras.layers.Dropout(dropout_amount))
# Final layer bring us back to embedding dimension.
mlp.add(tf.keras.layers.Dense(self._embedding_dim, activation='linear'))
if final_dropout:
mlp.add(tf.keras.layers.Dropout(dropout_amount))
if normalize_embeddings:
mlp.add(tf.keras.layers.LayerNormalization(axis=1))
return x_input, mlp(x)
def call(self, x, training=True):
embedding = super(LinearModel, self).call(x, training)
if self.embedding_matrix is not None:
scores = tf.matmul(
embedding, self.embedding_matrix, transpose_b=True)
return scores, embedding
else:
return None, embedding
def compute_loss(self, labels, scores):
if (self._max_num_distractors != -1 and
self._max_num_distractors <= scores.shape[1]):
# Truncates the number of distractors and redefines labels and scores.
# TODO(dei): Add gin config arg for choosing random num distractor.s
# max_num_dist = tf.random.uniform(
# [], 1, self.embedding_matrix.shape[0], dtype=tf.int32)
max_num_dist = self._max_num_distractors
def slice_to_max_num_distractors_fn(inputs):
"""Reduces the number of distractors to the max number."""
label_for_ex, scores_for_ex = inputs
scores_nocorrect = tf.concat(
[scores_for_ex[0:label_for_ex],
scores_for_ex[(label_for_ex+1):]],
axis=0)
random_start_index = tf.random.uniform(
shape=[],
minval=0,
maxval=scores_for_ex.shape[0]-max_num_dist,
dtype=tf.int32)
new_scores = scores_nocorrect[
random_start_index:random_start_index+max_num_dist]
# Put the groundtruth embedding in position 0 to make labels easy.
new_scores = tf.concat(
[tf.expand_dims(scores_for_ex[label_for_ex], 0), new_scores],
axis=0)
return new_scores
# Truncates the number of distractors being scores to the max number.
scores = tf.map_fn(slice_to_max_num_distractors_fn,
[labels, scores], dtype=tf.float32)
logging.warning('HERE: scores=%s, labels%s',
str(scores.shape), str(labels.shape))
# Since we moved the correct embedding to position 0.
labels = tf.zeros_like(labels)
main_loss = self._loss_object(labels, scores)
return main_loss
def create_metrics(self):
"""Outputs a dictionary containing all the metrics we want to log."""
metrics = [
tf.keras.metrics.Mean(name='train_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(name='train_acc'),
tf.keras.metrics.Accuracy(name='valid_nolabel_acc'),
tf.keras.metrics.Accuracy(name='train_subset_acc'),
tf.keras.metrics.Accuracy(name='valid_spring2016_acc'),
tf.keras.metrics.Accuracy(name='valid_winter2018_acc')]
if self.small_context_loss_weight > 0.0:
metrics.append(tf.keras.metrics.Mean(name='main_loss'))
metrics.append(tf.keras.metrics.Mean(name='small_context_loss'))
metrics = collections.OrderedDict((m.name, m) for m in metrics)
return metrics
@gin.configurable
class ResidualModel(LinearModel):
"""Residual multi-layer perceptron with embedding matrix at end."""
@gin.configurable('ResidualModel.hparams')
def _build_network(self,
residual_layer_size=1024,
num_residual_layers=2,
dropout_amount=0.5,
small_context_loss_weight=0.0,
max_num_distractors=-1):
"""Builds an MLP with residual connections.
Args:
residual_layer_size: Dimension for linear layer to add to MLP.
num_residual_layers: Number of residual layer.
dropout_amount: If training, how much dropout to use in each layer.
small_context_loss_weight: If >0, in addition to the loss with many
distractors, add another loss where the only distractors are the
sentences of the context.
max_num_distractors: The maximum number of distractors provided at each
train step.
Returns:
The input and output tensors for the network, with the input being a
placeholder variable.
"""
self.small_context_loss_weight = small_context_loss_weight
self._max_num_distractors = max_num_distractors
# x starts off with dimension [batch_size x num_sentences x emb_size].
# Convert it to [batch_size x (num_sentences*emb_size)].
x_input = tf.keras.Input(
shape=[self._num_input_sentences, self._embedding_dim])
flattened_shape = [-1, self._num_input_sentences * self._embedding_dim]
x = tf.reshape(x_input, flattened_shape)
def block(start_x, embedding_size):
x = tf.keras.layers.Dense(embedding_size, activation='relu')(start_x)
x = tf.keras.layers.Dropout(dropout_amount)(x)
x = tf.keras.layers.Dense(embedding_size, activation='relu')(x)
return x + start_x
x = tf.keras.layers.LayerNormalization(axis=1)(x)
# First bring dimension down to desired.
x = tf.keras.layers.Dense(residual_layer_size)(x)
# Add specified number of residual layers.
for _ in range(num_residual_layers):
x = block(x, residual_layer_size)
# Go back up to desired dimension.
x = tf.keras.layers.Dense(self._embedding_dim, activation='linear')(x)
x = tf.keras.layers.LayerNormalization(axis=1)(x)
return x_input, x
@gin.configurable(allowlist=['network_class'])
def build_model(num_input_sentences,
embedding_matrix=None,
embedding_dim=None,
network_class=None):
"""Creates the model object and returns it."""
if network_class is None:
# Default to the fully connected model.
model = LinearModel(num_input_sentences, embedding_matrix, embedding_dim)
else:
model = network_class(num_input_sentences, embedding_matrix, embedding_dim)
return model
| 2,236 | 0 | 77 |
a31127b72b7d21ee24259615b6d11002355968cb | 1,513 | py | Python | Structural/composite.py | TheVikingGent/DesignPatterns4Python | ace9f577d9700fe290d80822230acb8e87833bc2 | [
"MIT"
] | null | null | null | Structural/composite.py | TheVikingGent/DesignPatterns4Python | ace9f577d9700fe290d80822230acb8e87833bc2 | [
"MIT"
] | null | null | null | Structural/composite.py | TheVikingGent/DesignPatterns4Python | ace9f577d9700fe290d80822230acb8e87833bc2 | [
"MIT"
] | null | null | null | import abc
# Good old composite pattern
# This is used when we want to create a hierachy of instances that contain other instances,
# but we want to operate on all instances somewhat equally
# Here the composite instances can contain other composites or leafs
# All implement the operation method, where the composite will be sure to
# call the same method on all its childred
# Note that some methods are not implemented on Leaf as that does not make sense.
# They throw errors for the sake of safety, but they kinda need to be there
# so that Composites and Leafs can be treated in a similar way
c1 = Composite()
c1.add(Leaf())
c1.add(Leaf())
c2 = Composite()
c2.add(Leaf())
c2.add(c1)
print(c2.operation()) | 26.086207 | 91 | 0.682089 | import abc
# Good old composite pattern
# This is used when we want to create a hierachy of instances that contain other instances,
# but we want to operate on all instances somewhat equally
# Here the composite instances can contain other composites or leafs
# All implement the operation method, where the composite will be sure to
# call the same method on all its childred
# Note that some methods are not implemented on Leaf as that does not make sense.
# They throw errors for the sake of safety, but they kinda need to be there
# so that Composites and Leafs can be treated in a similar way
class Component(object):
def operation(self):
raise NotImplementedError
def add(self, child):
raise NotImplementedError
def remove(self, child):
raise NotImplementedError
def get_child(self, index):
raise NotImplementedError
class Composite(Component):
def __init__(self):
self._children = []
def operation(self):
result = '|'
result += ','.join([child.operation() for child in self._children])
result += '|'
return result
def add(self, child):
self._children.append(child)
def remove(self, child):
self._children.remove(child)
def get_child(self, index):
self._children[index]
class Leaf(Component):
def operation(self):
return 'leaf'
c1 = Composite()
c1.add(Leaf())
c1.add(Leaf())
c2 = Composite()
c2.add(Leaf())
c2.add(c1)
print(c2.operation()) | 444 | 10 | 344 |
31da67c90663432016d6bb3ba1113b4ff12e3c5a | 3,435 | py | Python | tests/request_prep.py | cmd410/genki | cbe1435d2c7423fe56dfc3302c69a4808d95c3c2 | [
"MIT"
] | 2 | 2020-10-02T06:55:48.000Z | 2020-10-02T12:21:20.000Z | tests/request_prep.py | cmd410/genki | cbe1435d2c7423fe56dfc3302c69a4808d95c3c2 | [
"MIT"
] | 3 | 2020-10-15T20:20:58.000Z | 2020-10-15T20:26:22.000Z | tests/request_prep.py | cmd410/genki | cbe1435d2c7423fe56dfc3302c69a4808d95c3c2 | [
"MIT"
] | null | null | null | from unittest import TestCase
from itertools import product
from genki.http.url.parse import parse_url, url_parse_result
from genki.http.request import RequestBuilder
from genki.http.constants import Scheme
from genki.http.url.exceptions import InvalidURL
| 27.926829 | 69 | 0.445997 | from unittest import TestCase
from itertools import product
from genki.http.url.parse import parse_url, url_parse_result
from genki.http.request import RequestBuilder
from genki.http.constants import Scheme
from genki.http.url.exceptions import InvalidURL
def generate_url():
protos = ('http', 'https', '')
domains = (
'example.com', '[2001:db8::]', '127.0.0.1'
)
ports = (8080, 6204, '')
usernames = ('username', '')
passwords = ('password', '')
paths = (
'/',
'/some/path'
)
queries = ('', '?param=value')
fragments = ('', 'fragment')
for proto, user, password, host, port, path, query, fragment in \
product(protos, usernames, passwords,
domains, ports, paths, queries, fragments):
url = ''
if proto:
url = f'{proto}://'
if not port:
port = 443 if proto == 'https' else 80
else:
proto = 'http'
if user:
url += f'{user}'
if password:
url += f':{password}'
url += '@'
url += f'{host}'
if port:
url += f':{port}'
url += f'{path}{query}'
if fragment:
url += f'#{fragment}'
if not port:
port = 443 if proto == 'https' else 80
yield url, url_parse_result(
Scheme(proto),
host,
path,
port,
user,
password if user else '',
query[1:],
fragment)
class RequestPreparations(TestCase):
def test_url(self):
"""Check that url parses correctly
"""
cases = list(generate_url())
for url, result in cases:
with self.subTest(url=url):
r = parse_url(url)
self.assertEqual(r, result)
def test_invalid_urls(self):
"""Make sure invalid urls will raise an error
"""
invalid_cases = [
'https://',
'/',
'',
'example.com:',
':example.com',
'http://example.com:',
'http://:example.com'
]
for url in invalid_cases:
with self.subTest(url=url):
self.assertRaises(InvalidURL, RequestBuilder, url)
def test_to_bytes(self):
"""Test that request converts to bytes correctly
"""
s = 'GET {path} HTTP/1.1\r\n'
hosts = [
'example.com',
'http://example.com',
'https://example.com',
'http://example.com:8080'
]
paths = [
'/',
'/some/path'
]
for host, path in product(hosts, paths):
url = host + path
with self.subTest(url=url):
req = RequestBuilder(url)
if '://' in host:
host = host[host.find('://') + 3:]
if ':' in host:
host = host[:host.find(':')]
req_body = s.format(host=hosts[0], path=path)
self.assertEqual(req.to_bytes(), (
''.join(
[
req_body,
f'Host: {host}\r\n',
'Connection: close\r\n',
'\r\n',
]
).encode()))
| 1,279 | 1,851 | 46 |
0931f806ba3567f0c3f9807d5366398745b76ddd | 4,292 | py | Python | gen/tests/test_adminrouter_tls_conf.py | Chewie/dcos | e2da3c7abf02d258b5b3292338f69dc4d59d34c5 | [
"Apache-2.0"
] | null | null | null | gen/tests/test_adminrouter_tls_conf.py | Chewie/dcos | e2da3c7abf02d258b5b3292338f69dc4d59d34c5 | [
"Apache-2.0"
] | 1 | 2020-02-09T11:37:07.000Z | 2020-02-09T11:37:07.000Z | gen/tests/test_adminrouter_tls_conf.py | Chewie/dcos | e2da3c7abf02d258b5b3292338f69dc4d59d34c5 | [
"Apache-2.0"
] | null | null | null | from textwrap import dedent
from typing import List
import pytest
import gen
from gen.tests.utils import make_arguments, true_false_msg, validate_error
class TestAdminRouterTLSConfig:
"""
Tests for the Admin Router TLS Config creation.
"""
def test_default(self):
"""
By default, the configuration specifies certain TLS settings.
This test is a sanity check for the configuration template logic
rather than a particularly useful feature test.
"""
config_path = '/etc/adminrouter-tls.conf'
arguments = make_arguments(new_arguments={})
generated = gen.generate(arguments=arguments)
package = generated.templates['dcos-config.yaml']['package']
[config] = [item for item in package if item['path'] == config_path]
expected_configuration = dedent(
"""\
# Ref: https://github.com/cloudflare/sslconfig/blob/master/conf
# Modulo ChaCha20 cipher.
ssl_ciphers EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
ssl_prefer_server_ciphers on;
# To manually test which TLS versions are enabled on a node, use
# `openssl` commands.
#
# See comments on https://jira.mesosphere.com/browse/DCOS-13437 for more
# details.
ssl_protocols TLSv1.1 TLSv1.2;
"""
)
assert config['content'] == expected_configuration
class TestToggleTLS1:
"""
Tests for toggling TLS 1.0.
To manually test that this is, in fact, a working toggle for TLS 1.0, use
`openssl` commands.
See comments on https://jira.mesosphere.com/browse/DCOS-13437 for more
details.
"""
def supported_ssl_protocols(self, new_config_arguments) -> List[str]:
"""
This finds a line which looks like the following:
ssl protocols TLSv1, TLSv1.1;
in the Admin Router TLS configuration.
It then returns the listed protocols.
Args:
new_config_arguments: Arguments which are added to the 'standard'
set of arguments before generating configuration files.
Returns:
A ``list`` of supported SSL protocols.
"""
arguments = make_arguments(new_arguments=new_config_arguments)
generated = gen.generate(arguments=arguments)
package = generated.templates['dcos-config.yaml']['package']
config_path = '/etc/adminrouter-tls.conf'
[config] = [item for item in package if item['path'] == config_path]
[ssl_protocols_line] = [
line for line in config['content'].split('\n') if
# We strip whitespace from the beginning of the line as NGINX
# configuration lines can start with whitespace.
line.lstrip().startswith('ssl_protocols ')
]
ssl_protocols_line = ssl_protocols_line.strip(';')
protocols = ssl_protocols_line.split()[1:]
return protocols
def test_validation(self):
"""
The config variable `tls_1_0_enabled` must be 'true' or 'false'.
"""
validate_error(
new_arguments={'adminrouter_tls_1_0_enabled': 'foo'},
key='adminrouter_tls_1_0_enabled',
message=true_false_msg,
)
@pytest.mark.parametrize(
'new_arguments', [{}, {'adminrouter_tls_1_0_enabled': 'false'}]
)
def test_default(self, new_arguments):
"""
By default TLS 1.0 is disabled, and therefore by default the config
variable is set to 'false'.
This test is parametrized to demonstrate that having no configuration
produces the same results as setting the config variable to `'false'`.
"""
protocols = self.supported_ssl_protocols(
new_config_arguments=new_arguments,
)
assert protocols == ['TLSv1.1', 'TLSv1.2']
def test_enable(self):
"""
Setting the config variable to 'true' enables TLS 1.0.
"""
new_arguments = {'adminrouter_tls_1_0_enabled': 'true'}
protocols = self.supported_ssl_protocols(
new_config_arguments=new_arguments,
)
assert protocols == ['TLSv1', 'TLSv1.1', 'TLSv1.2']
| 35.471074 | 97 | 0.629077 | from textwrap import dedent
from typing import List
import pytest
import gen
from gen.tests.utils import make_arguments, true_false_msg, validate_error
class TestAdminRouterTLSConfig:
"""
Tests for the Admin Router TLS Config creation.
"""
def test_default(self):
"""
By default, the configuration specifies certain TLS settings.
This test is a sanity check for the configuration template logic
rather than a particularly useful feature test.
"""
config_path = '/etc/adminrouter-tls.conf'
arguments = make_arguments(new_arguments={})
generated = gen.generate(arguments=arguments)
package = generated.templates['dcos-config.yaml']['package']
[config] = [item for item in package if item['path'] == config_path]
expected_configuration = dedent(
"""\
# Ref: https://github.com/cloudflare/sslconfig/blob/master/conf
# Modulo ChaCha20 cipher.
ssl_ciphers EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
ssl_prefer_server_ciphers on;
# To manually test which TLS versions are enabled on a node, use
# `openssl` commands.
#
# See comments on https://jira.mesosphere.com/browse/DCOS-13437 for more
# details.
ssl_protocols TLSv1.1 TLSv1.2;
"""
)
assert config['content'] == expected_configuration
class TestToggleTLS1:
"""
Tests for toggling TLS 1.0.
To manually test that this is, in fact, a working toggle for TLS 1.0, use
`openssl` commands.
See comments on https://jira.mesosphere.com/browse/DCOS-13437 for more
details.
"""
def supported_ssl_protocols(self, new_config_arguments) -> List[str]:
"""
This finds a line which looks like the following:
ssl protocols TLSv1, TLSv1.1;
in the Admin Router TLS configuration.
It then returns the listed protocols.
Args:
new_config_arguments: Arguments which are added to the 'standard'
set of arguments before generating configuration files.
Returns:
A ``list`` of supported SSL protocols.
"""
arguments = make_arguments(new_arguments=new_config_arguments)
generated = gen.generate(arguments=arguments)
package = generated.templates['dcos-config.yaml']['package']
config_path = '/etc/adminrouter-tls.conf'
[config] = [item for item in package if item['path'] == config_path]
[ssl_protocols_line] = [
line for line in config['content'].split('\n') if
# We strip whitespace from the beginning of the line as NGINX
# configuration lines can start with whitespace.
line.lstrip().startswith('ssl_protocols ')
]
ssl_protocols_line = ssl_protocols_line.strip(';')
protocols = ssl_protocols_line.split()[1:]
return protocols
def test_validation(self):
"""
The config variable `tls_1_0_enabled` must be 'true' or 'false'.
"""
validate_error(
new_arguments={'adminrouter_tls_1_0_enabled': 'foo'},
key='adminrouter_tls_1_0_enabled',
message=true_false_msg,
)
@pytest.mark.parametrize(
'new_arguments', [{}, {'adminrouter_tls_1_0_enabled': 'false'}]
)
def test_default(self, new_arguments):
"""
By default TLS 1.0 is disabled, and therefore by default the config
variable is set to 'false'.
This test is parametrized to demonstrate that having no configuration
produces the same results as setting the config variable to `'false'`.
"""
protocols = self.supported_ssl_protocols(
new_config_arguments=new_arguments,
)
assert protocols == ['TLSv1.1', 'TLSv1.2']
def test_enable(self):
"""
Setting the config variable to 'true' enables TLS 1.0.
"""
new_arguments = {'adminrouter_tls_1_0_enabled': 'true'}
protocols = self.supported_ssl_protocols(
new_config_arguments=new_arguments,
)
assert protocols == ['TLSv1', 'TLSv1.1', 'TLSv1.2']
| 0 | 0 | 0 |
d6b0e492861296d87420523ace08720910f389af | 735 | py | Python | bin/change_table_engine.py | Osso/dotfiles | 26a079e140f9f9ba8117d42aa25a049807965093 | [
"MIT"
] | 3 | 2017-04-21T20:56:10.000Z | 2019-06-10T09:24:14.000Z | bin/change_table_engine.py | Osso/dotfiles | 26a079e140f9f9ba8117d42aa25a049807965093 | [
"MIT"
] | null | null | null | bin/change_table_engine.py | Osso/dotfiles | 26a079e140f9f9ba8117d42aa25a049807965093 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
engine = 'innodb'
host = 'localhost'
db_name = ''
user = ''
passwd = ''
skip_tables = ()
import MySQLdb
db = MySQLdb.connect(user=user, passwd=passwd, db=db_name, host=host)
c = db.cursor()
c.execute("show tables")
row = c.fetchone()
while row:
table = row[0]
print 'Converting Table: %s' % table
e = db.cursor()
e.execute("SHOW TABLE STATUS from `%s` LIKE '%s'" % (db_name, table))
info = e.fetchone()
if table in skip_tables or info[1] == engine:
print 'Skipping'
row = c.fetchone()
continue
e.execute('ALTER TABLE `%s` ENGINE = %s, tablespace ts_1 storage disk' % (MySQLdb.escape_string(table), engine))
row = c.fetchone()
print 'Done'
c.close()
| 23.709677 | 116 | 0.623129 | #!/usr/bin/env python
engine = 'innodb'
host = 'localhost'
db_name = ''
user = ''
passwd = ''
skip_tables = ()
import MySQLdb
db = MySQLdb.connect(user=user, passwd=passwd, db=db_name, host=host)
c = db.cursor()
c.execute("show tables")
row = c.fetchone()
while row:
table = row[0]
print 'Converting Table: %s' % table
e = db.cursor()
e.execute("SHOW TABLE STATUS from `%s` LIKE '%s'" % (db_name, table))
info = e.fetchone()
if table in skip_tables or info[1] == engine:
print 'Skipping'
row = c.fetchone()
continue
e.execute('ALTER TABLE `%s` ENGINE = %s, tablespace ts_1 storage disk' % (MySQLdb.escape_string(table), engine))
row = c.fetchone()
print 'Done'
c.close()
| 0 | 0 | 0 |
470d276d504c478d5495528219b996f256618f93 | 37 | py | Python | bsdict/__init__.py | andrei-dubovik/bsdict | d3c4d3c9cab4710de2f26d6d8bd7be7c3a03789e | [
"BSD-3-Clause"
] | null | null | null | bsdict/__init__.py | andrei-dubovik/bsdict | d3c4d3c9cab4710de2f26d6d8bd7be7c3a03789e | [
"BSD-3-Clause"
] | null | null | null | bsdict/__init__.py | andrei-dubovik/bsdict | d3c4d3c9cab4710de2f26d6d8bd7be7c3a03789e | [
"BSD-3-Clause"
] | null | null | null | from .bsdict import bsdict, memoizer
| 18.5 | 36 | 0.810811 | from .bsdict import bsdict, memoizer
| 0 | 0 | 0 |
b72d18c6f62c214e3983921081d4f4cd19c26629 | 247 | py | Python | codechef/may_long_challenge/bella-ciao.py | abhishek-parashar/Right-From-Scratch | e596344b0db95cfdeba876676885f062ef5f7c23 | [
"Apache-2.0"
] | null | null | null | codechef/may_long_challenge/bella-ciao.py | abhishek-parashar/Right-From-Scratch | e596344b0db95cfdeba876676885f062ef5f7c23 | [
"Apache-2.0"
] | null | null | null | codechef/may_long_challenge/bella-ciao.py | abhishek-parashar/Right-From-Scratch | e596344b0db95cfdeba876676885f062ef5f7c23 | [
"Apache-2.0"
] | null | null | null | t = int(input())
while(t>0):
a=list(map(int,input().split(' ')))
D=a[0]
d=a[1]
p=a[2]
q=a[3]
remainder=D%d
n=D//d
value=(n*p*d) + (d*q*(n*(n-1)//2))+(p*remainder+(remainder*q*n))
print(value,"\n")
t=t-1
| 19 | 68 | 0.453441 | t = int(input())
while(t>0):
a=list(map(int,input().split(' ')))
D=a[0]
d=a[1]
p=a[2]
q=a[3]
remainder=D%d
n=D//d
value=(n*p*d) + (d*q*(n*(n-1)//2))+(p*remainder+(remainder*q*n))
print(value,"\n")
t=t-1
| 0 | 0 | 0 |
d5d81698fcf1a5e331071733b775c2a1cf01aa4e | 1,277 | py | Python | cajas/boxes/models/box_daily_square.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/boxes/models/box_daily_square.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | cajas/boxes/models/box_daily_square.py | dmontoya1/cajas | 5eb3d5835250d5dafae398082200b79c1ca8063b | [
"MIT"
] | null | null | null | from django.db import models
from cajas.users.models.user import User
from cajas.office.models.officeCountry import OfficeCountry
class BoxDailySquare(models.Model):
"""Modelo para la caja de un cuadre diario
"""
user = models.ForeignKey(
User,
verbose_name='Usuario',
on_delete=models.SET_NULL,
blank=True, null=True,
related_name='related_daily_box'
)
office = models.ForeignKey(
OfficeCountry,
verbose_name='Oficina',
related_name='related_daily_square_boxes',
blank=True, null=True,
on_delete=models.SET_NULL
)
balance = models.IntegerField(
"Saldo de la caja",
default=0
)
is_active = models.BooleanField(
"Caja Activa?",
default=True
)
last_movement_id = models.IntegerField(
'id último movimiento',
default=0
)
is_closed = models.BooleanField(
"Caja cerrada?",
default=False
)
| 25.54 | 84 | 0.624119 | from django.db import models
from cajas.users.models.user import User
from cajas.office.models.officeCountry import OfficeCountry
class BoxDailySquare(models.Model):
"""Modelo para la caja de un cuadre diario
"""
user = models.ForeignKey(
User,
verbose_name='Usuario',
on_delete=models.SET_NULL,
blank=True, null=True,
related_name='related_daily_box'
)
office = models.ForeignKey(
OfficeCountry,
verbose_name='Oficina',
related_name='related_daily_square_boxes',
blank=True, null=True,
on_delete=models.SET_NULL
)
balance = models.IntegerField(
"Saldo de la caja",
default=0
)
is_active = models.BooleanField(
"Caja Activa?",
default=True
)
last_movement_id = models.IntegerField(
'id último movimiento',
default=0
)
is_closed = models.BooleanField(
"Caja cerrada?",
default=False
)
def __str__(self):
if self.user:
return "Caja de {} de {}".format(self.user.get_full_name(), self.office)
return "Caja de cuadre diario"
class Meta:
verbose_name = 'Caja de Cuadre Diario'
verbose_name_plural = 'Cajas de Cuadre Diario'
| 143 | 92 | 54 |
0c5c224024b11fee0b68ab1b9509d0c08386838c | 600 | py | Python | src/state.py | JovialKnoll/monsters | 15d969d0220fd003c2c28ae690f66633da370682 | [
"MIT"
] | 2 | 2017-05-14T06:37:14.000Z | 2022-03-07T02:25:32.000Z | src/state.py | JovialKnoll/monsters | 15d969d0220fd003c2c28ae690f66633da370682 | [
"MIT"
] | 2 | 2017-10-08T19:41:18.000Z | 2021-04-08T04:40:50.000Z | src/state.py | JovialKnoll/monsters | 15d969d0220fd003c2c28ae690f66633da370682 | [
"MIT"
] | null | null | null | from monster import Monster
| 22.222222 | 58 | 0.583333 | from monster import Monster
class State(object):
__slots__ = (
'protag_mon',
'fight_results',
)
def __init__(self):
# start with a random monster
self.protag_mon = Monster()
self.fight_results = []
def save(self):
return {
'protag_mon': self.protag_mon,
'fight_results': self.fight_results,
}
@classmethod
def load(cls, save_data):
new_obj = cls()
new_obj.protag_mon = save_data['protag_mon']
new_obj.fight_results = save_data['fight_results']
return new_obj
| 380 | 168 | 23 |
717a3ae15beb8d819244f7e8f3b22e2b9d7c3d30 | 107 | py | Python | verpy/pybin3/tb.py | avielazari/vlsistuff | 34304dc64437fc849d74addd09963dca587df537 | [
"MIT"
] | 26 | 2018-03-17T18:14:22.000Z | 2022-03-14T07:23:13.000Z | verpy/pybin3/tb.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 1 | 2019-10-16T10:31:11.000Z | 2019-10-17T04:14:53.000Z | verpy/pybin3/tb.py | psumesh/vlsistuff | 1fe64b093d0581d99c7d826b74c31b8655fa0b31 | [
"MIT"
] | 7 | 2018-07-16T07:51:25.000Z | 2022-02-15T14:22:54.000Z |
import dump_instance
| 11.888889 | 32 | 0.691589 |
import dump_instance
def help_main(Env):
Env.params['-tb'] = True
dump_instance.help_main(Env)
| 60 | 0 | 23 |
42a0c1b65965757cd699b5f4010098cef1cf0aa6 | 1,506 | py | Python | twoject/urls.py | Daniel-Muruthi/poladapi | 6a7c6d7a78f66c3ae2a2fdf6bebfc68c009aee36 | [
"MIT"
] | null | null | null | twoject/urls.py | Daniel-Muruthi/poladapi | 6a7c6d7a78f66c3ae2a2fdf6bebfc68c009aee36 | [
"MIT"
] | null | null | null | twoject/urls.py | Daniel-Muruthi/poladapi | 6a7c6d7a78f66c3ae2a2fdf6bebfc68c009aee36 | [
"MIT"
] | null | null | null | """twoject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from twapp import views as twapp_views
from django.contrib.auth import views as auth_views
from rest_framework import routers
from rest_framework_simplejwt.views import TokenRefreshView
from knox import views as knox_views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('twapp.urls')),
path('auth/login/', twapp_views.LoginView.as_view(), name="login"),
path('auth/login/refresh/', TokenRefreshView.as_view(), name='login_refresh'),
path('auth/register/', twapp_views.RegisterView.as_view(), name='register'),
path('auth/logout/', knox_views.LogoutView.as_view(), name="logout"),
path('auth/logoutall/', knox_views.LogoutAllView.as_view(), name="logoutall"),
]
| 41.833333 | 82 | 0.73838 | """twoject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from twapp import views as twapp_views
from django.contrib.auth import views as auth_views
from rest_framework import routers
from rest_framework_simplejwt.views import TokenRefreshView
from knox import views as knox_views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('twapp.urls')),
path('auth/login/', twapp_views.LoginView.as_view(), name="login"),
path('auth/login/refresh/', TokenRefreshView.as_view(), name='login_refresh'),
path('auth/register/', twapp_views.RegisterView.as_view(), name='register'),
path('auth/logout/', knox_views.LogoutView.as_view(), name="logout"),
path('auth/logoutall/', knox_views.LogoutAllView.as_view(), name="logoutall"),
]
| 0 | 0 | 0 |
4017feb743106024d7f6df30d888f397fc590313 | 1,640 | py | Python | src/stat_logger.py | Waterkin/stocknet-code | 6df878b599963e9fe31603dd55f78fd56e92f7d9 | [
"MIT"
] | null | null | null | src/stat_logger.py | Waterkin/stocknet-code | 6df878b599963e9fe31603dd55f78fd56e92f7d9 | [
"MIT"
] | null | null | null | src/stat_logger.py | Waterkin/stocknet-code | 6df878b599963e9fe31603dd55f78fd56e92f7d9 | [
"MIT"
] | null | null | null | '''
Date: 2022-01-11 16:05:39
LastEditors: Waterking
LastEditTime: 2022-01-12 18:21:49
FilePath: /stocknet-code/src/stat_logger.py
'''
#!/usr/local/bin/python
import metrics as metrics
from ConfigLoader import logger
| 38.139535 | 142 | 0.681707 | '''
Date: 2022-01-11 16:05:39
LastEditors: Waterking
LastEditTime: 2022-01-12 18:21:49
FilePath: /stocknet-code/src/stat_logger.py
'''
#!/usr/local/bin/python
import metrics as metrics
from ConfigLoader import logger
def print_batch_stat(n_iter, train_batch_loss, train_batch_n_acc, train_batch_size):
iter_str = '\titer: {0}'.format(n_iter)
loss_str = 'batch loss: {:.6f}'.format(train_batch_loss) if type(train_batch_loss) is float else 'batch loss: {}'.format(train_batch_loss)
train_batch_acc = metrics.eval_acc(n_acc=train_batch_n_acc, total=train_batch_size)
acc_str = 'batch acc: {:.6f}'.format(train_batch_acc)
logger.info(', '.join((iter_str, loss_str, acc_str)))
def print_epoch_stat(epoch_loss, epoch_acc):
epoch_stat_pattern = 'Epoch: loss: {0:.6f}, acc: {1:.6f}'
logger.info(epoch_stat_pattern.format(epoch_loss, epoch_acc))
def print_eval_res(result_dict, use_mcc=True, use_f1=True):
# modify use_mcc=None -> use_mcc=True, add F1
eval_loss, eval_acc = result_dict['loss'], result_dict['acc']
iter_str = '\tEval'
loss_str = 'loss: {:.6f}'.format(eval_loss) if type(eval_loss) is float else 'eval loss: {}'.format(eval_loss)
acc_str = 'acc: {:.6f}'.format(eval_acc)
info_list = [iter_str, loss_str, acc_str]
if use_mcc:
mcc = result_dict['mcc']
mcc_str = 'mcc: {:.6f}'.format(mcc) if mcc else 'mcc: {}'.format(mcc)
info_list.append(mcc_str)
#
if use_f1:
f1 = result_dict['f1']
f1_str = 'f1: {:.6f}'.format(f1) if f1 else 'f1: {}'.format(mcc)
info_list.append(f1_str)
logger.info(', '.join(info_list))
| 1,351 | 0 | 69 |
6b3f6344429debb4b2157d0a395536e9054d9037 | 59,437 | py | Python | rpm_s3/vendor/createrepo/createrepo/__init__.py | stackstate-lab/rpm-s3 | 6c7929fc6034a93787ab5596876c8d00826486db | [
"BSD-2-Clause"
] | null | null | null | rpm_s3/vendor/createrepo/createrepo/__init__.py | stackstate-lab/rpm-s3 | 6c7929fc6034a93787ab5596876c8d00826486db | [
"BSD-2-Clause"
] | null | null | null | rpm_s3/vendor/createrepo/createrepo/__init__.py | stackstate-lab/rpm-s3 | 6c7929fc6034a93787ab5596876c8d00826486db | [
"BSD-2-Clause"
] | null | null | null | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2009 Red Hat, Inc -
# written by seth vidal skvidal at fedoraproject.org
import os
import sys
import fnmatch
import time
import yumbased
import shutil
from bz2 import BZ2File
from urlgrabber import grabber
import tempfile
import stat
import fcntl
import subprocess
from select import select
from yum import misc, Errors
from yum.repoMDObject import RepoMD, RepoData
from yum.sqlutils import executeSQL
from yum.packageSack import MetaSack
from yum.packages import YumAvailablePackage
import rpmUtils.transaction
from utils import _, errorprint, MDError, lzma, _available_compression
import readMetadata
try:
import sqlite3 as sqlite
except ImportError:
import sqlite
try:
import sqlitecachec
except ImportError:
pass
from utils import _gzipOpen, compressFile, compressOpen, checkAndMakeDir, GzipFile, \
checksum_and_rename, split_list_into_equal_chunks
from utils import num_cpus_online
import deltarpms
__version__ = '0.9.9'
class SplitMetaDataGenerator(MetaDataGenerator):
"""takes a series of dirs and creates repodata for all of them
most commonly used with -u media:// - if no outputdir is specified
it will create the repodata in the first dir in the list of dirs
"""
def doPkgMetadata(self):
"""all the heavy lifting for the package metadata"""
if len(self.conf.directories) == 1:
MetaDataGenerator.doPkgMetadata(self)
return
if self.conf.update:
self._setup_old_metadata_lookup()
filematrix = {}
for mydir in self.conf.directories:
if os.path.isabs(mydir):
thisdir = mydir
else:
if mydir.startswith('../'):
thisdir = os.path.realpath(mydir)
else:
thisdir = os.path.join(self.conf.basedir, mydir)
filematrix[mydir] = self.getFileList(thisdir, '.rpm')
# pkglist is a bit different for split media, as we have to know
# which dir. it belongs to. So we walk the dir. and then filter.
# We could be faster by not walking the dir. ... but meh.
if self.conf.pkglist:
pkglist = set(self.conf.pkglist)
pkgs = []
for fname in filematrix[mydir]:
if fname not in pkglist:
continue
pkgs.append(fname)
filematrix[mydir] = pkgs
self.trimRpms(filematrix[mydir])
self.pkgcount += len(filematrix[mydir])
mediano = 1
self.current_pkg = 0
self.conf.baseurl = self._getFragmentUrl(self.conf.baseurl, mediano)
try:
self.openMetadataDocs()
for mydir in self.conf.directories:
self.conf.baseurl = self._getFragmentUrl(self.conf.baseurl, mediano)
self.writeMetadataDocs(filematrix[mydir], mydir)
mediano += 1
self.conf.baseurl = self._getFragmentUrl(self.conf.baseurl, 1)
self.closeMetadataDocs()
except (IOError, OSError) as e:
raise MDError(_('Cannot access/write repodata files: %s') % e)
| 41.88654 | 536 | 0.549876 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2009 Red Hat, Inc -
# written by seth vidal skvidal at fedoraproject.org
import os
import sys
import fnmatch
import time
import yumbased
import shutil
from bz2 import BZ2File
from urlgrabber import grabber
import tempfile
import stat
import fcntl
import subprocess
from select import select
from yum import misc, Errors
from yum.repoMDObject import RepoMD, RepoData
from yum.sqlutils import executeSQL
from yum.packageSack import MetaSack
from yum.packages import YumAvailablePackage
import rpmUtils.transaction
from utils import _, errorprint, MDError, lzma, _available_compression
import readMetadata
try:
import sqlite3 as sqlite
except ImportError:
import sqlite
try:
import sqlitecachec
except ImportError:
pass
from utils import _gzipOpen, compressFile, compressOpen, checkAndMakeDir, GzipFile, \
checksum_and_rename, split_list_into_equal_chunks
from utils import num_cpus_online
import deltarpms
__version__ = '0.9.9'
class MetaDataConfig(object):
def __init__(self):
self.quiet = False
self.verbose = False
self.profile = False
self.excludes = []
self.baseurl = None
self.groupfile = None
self.sumtype = 'sha256'
self.pretty = False
self.cachedir = None
self.use_cache = False
self.basedir = os.getcwd()
self.checkts = False
self.split = False
self.update = False
self.deltas = False # do the deltarpm thing
# where to put the .drpms - defaults to 'drpms' inside 'repodata'
self.deltadir = None
self.delta_relative = 'drpms/'
self.oldpackage_paths = [] # where to look for the old packages -
self.deltafile = 'prestodelta.xml'
self.num_deltas = 1 # number of older versions to delta (max)
self.max_delta_rpm_size = 100000000
self.update_md_path = None
self.skip_stat = False
self.database = True
self.outputdir = None
self.file_patterns = ['.*bin\/.*', '^\/etc\/.*', '^\/usr\/lib\/sendmail$']
self.dir_patterns = ['.*bin\/.*', '^\/etc\/.*']
self.skip_symlinks = False
self.pkglist = []
self.database_only = False
self.primaryfile = 'primary.xml'
self.filelistsfile = 'filelists.xml'
self.otherfile = 'other.xml'
self.repomdfile = 'repomd.xml'
self.tempdir = '.repodata'
self.finaldir = 'repodata'
self.olddir = '.olddata'
self.mdtimestamp = 0
self.directory = None
self.directories = []
self.changelog_limit = None # needs to be an int or None
self.unique_md_filenames = True
self.additional_metadata = {} # dict of 'type':'filename'
self.revision = str(int(time.time()))
self.content_tags = [] # flat list of strings (like web 2.0 tags)
self.distro_tags = []# [(cpeid(None allowed), human-readable-string)]
self.repo_tags = []# strings, forwhatever they are worth
self.read_pkgs_list = None # filepath/name to write out list of pkgs
# read in this run of createrepo
self.collapse_glibc_requires = True
self.worker_cmd = '/usr/share/createrepo/worker.py'
#self.worker_cmd = './worker.py' # helpful when testing
self.retain_old_md = 0
self.compress_type = 'compat'
class SimpleMDCallBack(object):
def errorlog(self, thing):
print >> sys.stderr, thing
def log(self, thing):
print(thing)
def progress(self, item, current, total):
sys.stdout.write('\r' + ' ' * 80)
sys.stdout.write("\r%d/%d - %s" % (current, total, item))
sys.stdout.flush()
class MetaDataGenerator:
def __init__(self, config_obj=None, callback=None):
self.conf = config_obj
if config_obj == None:
self.conf = MetaDataConfig()
if not callback:
self.callback = SimpleMDCallBack()
else:
self.callback = callback
self.ts = rpmUtils.transaction.initReadOnlyTransaction()
self.pkgcount = 0
self.current_pkg = 0
self.files = []
self.rpmlib_reqs = {}
self.read_pkgs = []
self.compat_compress = False
if not self.conf.directory and not self.conf.directories:
raise MDError("No directory given on which to run.")
if self.conf.compress_type == 'compat':
self.compat_compress = True
self.conf.compress_type = None
if not self.conf.compress_type:
self.conf.compress_type = 'gz'
if self.conf.compress_type not in utils._available_compression:
raise MDError("Compression %s not available: Please choose from: %s" \
% (self.conf.compress_type, ', '.join(utils._available_compression)))
if not self.conf.directories: # just makes things easier later
self.conf.directories = [self.conf.directory]
if not self.conf.directory: # ensure we have both in the config object
self.conf.directory = self.conf.directories[0]
# the cachedir thing:
if self.conf.cachedir:
self.conf.use_cache = True
# this does the dir setup we need done
self._parse_directory()
self._test_setup_dirs()
def _parse_directory(self):
"""pick up the first directory given to us and make sure we know
where things should go"""
if os.path.isabs(self.conf.directory):
self.conf.basedir = os.path.dirname(self.conf.directory)
self.conf.relative_dir = os.path.basename(self.conf.directory)
else:
self.conf.basedir = os.path.realpath(self.conf.basedir)
self.conf.relative_dir = self.conf.directory
self.package_dir = os.path.join(self.conf.basedir,
self.conf.relative_dir)
if not self.conf.outputdir:
self.conf.outputdir = os.path.join(self.conf.basedir,
self.conf.relative_dir)
def _test_setup_dirs(self):
# start the sanity/stupidity checks
for mydir in self.conf.directories:
if os.path.isabs(mydir):
testdir = mydir
else:
if mydir.startswith('../'):
testdir = os.path.realpath(mydir)
else:
testdir = os.path.join(self.conf.basedir, mydir)
if not os.path.exists(testdir):
raise MDError(_('Directory %s must exist') % mydir)
if not os.path.isdir(testdir):
raise MDError(_('%s must be a directory') % mydir)
if not os.access(self.conf.outputdir, os.W_OK):
raise MDError(_('Directory %s must be writable.') % self.conf.outputdir)
temp_output = os.path.join(self.conf.outputdir, self.conf.tempdir)
if not checkAndMakeDir(temp_output):
raise MDError(_('Cannot create/verify %s') % temp_output)
temp_final = os.path.join(self.conf.outputdir, self.conf.finaldir)
if not checkAndMakeDir(temp_final):
raise MDError(_('Cannot create/verify %s') % temp_final)
if self.conf.database:
# do flock test on temp_final, temp_output
# if it fails raise MDError
for direc in [temp_final, temp_output]:
f = open(direc + '/locktest', 'w')
try:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
except (OSError, IOError) as e:
raise MDError(
_("Could not create exclusive lock in %s and sqlite database generation enabled. Is this path on nfs? Is your lockd running?") % direc)
else:
os.unlink(direc + '/locktest')
if self.conf.deltas:
temp_delta = os.path.join(self.conf.outputdir,
self.conf.delta_relative)
if not checkAndMakeDir(temp_delta):
raise MDError(_('Cannot create/verify %s') % temp_delta)
self.conf.deltadir = temp_delta
if os.path.exists(os.path.join(self.conf.outputdir, self.conf.olddir)):
raise MDError(_('Old data directory exists, please remove: %s') % self.conf.olddir)
# make sure we can write to where we want to write to:
# and pickup the mdtimestamps while we're at it
direcs = ['tempdir' , 'finaldir']
if self.conf.deltas:
direcs.append('deltadir')
for direc in direcs:
filepath = os.path.join(self.conf.outputdir, getattr(self.conf,
direc))
if os.path.exists(filepath):
if not os.access(filepath, os.W_OK):
raise MDError(_('error in must be able to write to metadata dir:\n -> %s') % filepath)
if self.conf.checkts:
# checking for repodata/repomd.xml - not just the data dir
rxml = filepath + '/repomd.xml'
if os.path.exists(rxml):
timestamp = os.path.getctime(rxml)
if timestamp > self.conf.mdtimestamp:
self.conf.mdtimestamp = timestamp
if self.conf.groupfile:
a = self.conf.groupfile
if self.conf.split:
a = os.path.join(self.package_dir, self.conf.groupfile)
elif not os.path.isabs(a):
a = os.path.join(self.package_dir, self.conf.groupfile)
if not os.path.exists(a):
raise MDError(_('Error: groupfile %s cannot be found.' % a))
self.conf.groupfile = a
if self.conf.cachedir:
a = self.conf.cachedir
if not os.path.isabs(a):
a = os.path.join(self.conf.outputdir, a)
if not checkAndMakeDir(a):
raise MDError(_('Error: cannot open/write to cache dir %s' % a))
self.conf.cachedir = a
def _os_path_walk(self, top, func, arg):
"""Directory tree walk with callback function.
copy of os.path.walk, fixes the link/stating problem
"""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = os.path.join(top, name)
if os.path.isdir(name):
self._os_path_walk(name, func, arg)
def getFileList(self, directory, ext):
"""Return all files in path matching ext, store them in filelist,
recurse dirs. Returns a list object"""
extlen = len(ext)
def extension_visitor(filelist, dirname, names):
for fn in names:
fn = os.path.join(dirname, fn)
if os.path.isdir(fn):
continue
if self.conf.skip_symlinks and os.path.islink(fn):
continue
elif fn[-extlen:].lower() == '%s' % (ext):
filelist.append(fn[len(startdir):])
filelist = []
startdir = directory + '/'
self._os_path_walk(startdir, extension_visitor, filelist)
return filelist
def errorlog(self, thing):
"""subclass this if you want something different...."""
errorprint(thing)
def checkTimeStamps(self):
"""check the timestamp of our target dir. If it is not newer than
the repodata return False, else True"""
if self.conf.checkts and self.conf.mdtimestamp:
dn = os.path.join(self.conf.basedir, self.conf.directory)
files = self.getFileList(dn, '.rpm')
files = self.trimRpms(files)
for f in files:
fn = os.path.join(self.conf.basedir, self.conf.directory, f)
if not os.path.exists(fn):
self.callback.errorlog(_('cannot get to file: %s') % fn)
if os.path.getctime(fn) > self.conf.mdtimestamp:
return False
return True
return False
def trimRpms(self, files):
badrpms = []
for rpm_file in files:
for glob in self.conf.excludes:
if fnmatch.fnmatch(rpm_file, glob):
if rpm_file not in badrpms:
badrpms.append(rpm_file)
for rpm_file in badrpms:
if rpm_file in files:
files.remove(rpm_file)
return files
def _setup_old_metadata_lookup(self):
"""sets up the .oldData object for handling the --update call. Speeds
up generating updates for new metadata"""
#FIXME - this only actually works for single dirs. It will only
# function for the first dir passed to --split, not all of them
# this needs to be fixed by some magic in readMetadata.py
# using opts.pkgdirs as a list, I think.
if self.conf.update:
#build the paths
opts = {
'verbose' : self.conf.verbose,
'pkgdir' : os.path.normpath(self.package_dir)
}
if self.conf.skip_stat:
opts['do_stat'] = False
if self.conf.update_md_path:
norm_u_md_path = os.path.normpath(self.conf.update_md_path)
u_md_repodata_path = norm_u_md_path + '/repodata'
if not os.path.exists(u_md_repodata_path):
msg = _('Warning: could not open update_md_path: %s') % u_md_repodata_path
self.callback.errorlog(msg)
old_repo_path = os.path.normpath(norm_u_md_path)
else:
old_repo_path = self.conf.outputdir
#and scan the old repo
self.oldData = readMetadata.MetadataIndex(old_repo_path, opts)
def _setup_grabber(self):
if not hasattr(self, '_grabber'):
self._grabber = grabber.URLGrabber()
return self._grabber
grabber = property(fget = lambda self: self._setup_grabber())
def doPkgMetadata(self):
"""all the heavy lifting for the package metadata"""
if self.conf.update:
self._setup_old_metadata_lookup()
# rpms we're going to be dealing with
if self.conf.pkglist:
packages = self.conf.pkglist
else:
packages = self.getFileList(self.package_dir, '.rpm')
if not isinstance(packages, MetaSack):
packages = self.trimRpms(packages)
self.pkgcount = len(packages)
try:
self.openMetadataDocs()
self.writeMetadataDocs(packages)
self.closeMetadataDocs()
except (IOError, OSError) as e:
raise MDError(_('Cannot access/write repodata files: %s') % e)
def openMetadataDocs(self):
if self.conf.database_only:
self.setup_sqlite_dbs()
else:
self.primaryfile = self._setupPrimary()
self.flfile = self._setupFilelists()
self.otherfile = self._setupOther()
if self.conf.deltas:
self.deltafile = self._setupDelta()
def _setupPrimary(self):
# setup the primary metadata file
# FIXME - make this be conf.compress_type once y-m-p is fixed
fpz = self.conf.primaryfile + '.' + 'gz'
primaryfilepath = os.path.join(self.conf.outputdir, self.conf.tempdir,
fpz)
fo = compressOpen(primaryfilepath, 'w', 'gz')
fo.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fo.write('<metadata xmlns="http://linux.duke.edu/metadata/common"' \
' xmlns:rpm="http://linux.duke.edu/metadata/rpm" packages="%s">' %
self.pkgcount)
return fo
def _setupFilelists(self):
# setup the filelist file
# FIXME - make this be conf.compress_type once y-m-p is fixed
fpz = self.conf.filelistsfile + '.' + 'gz'
filelistpath = os.path.join(self.conf.outputdir, self.conf.tempdir,
fpz)
fo = compressOpen(filelistpath, 'w', 'gz')
fo.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fo.write('<filelists xmlns="http://linux.duke.edu/metadata/filelists"' \
' packages="%s">' % self.pkgcount)
return fo
def _setupOther(self):
# setup the other file
# FIXME - make this be conf.compress_type once y-m-p is fixed
fpz = self.conf.otherfile + '.' + 'gz'
otherfilepath = os.path.join(self.conf.outputdir, self.conf.tempdir,
fpz)
fo = compressOpen(otherfilepath, 'w', 'gz')
fo.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fo.write('<otherdata xmlns="http://linux.duke.edu/metadata/other"' \
' packages="%s">' %
self.pkgcount)
return fo
def _setupDelta(self):
# setup the other file
fpz = self.conf.deltafile + '.' + self.conf.compress_type
deltafilepath = os.path.join(self.conf.outputdir, self.conf.tempdir,
fpz)
fo = compressOpen(deltafilepath, 'w', self.conf.compress_type)
fo.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fo.write('<prestodelta>\n')
return fo
def read_in_package(self, rpmfile, pkgpath=None, reldir=None):
"""rpmfile == relative path to file from self.packge_dir"""
baseurl = self.conf.baseurl
if not pkgpath:
pkgpath = self.package_dir
if not rpmfile.strip():
raise MDError("Blank filename passed in, skipping")
if rpmfile.find("://") != -1:
if not hasattr(self, 'tempdir'):
self.tempdir = tempfile.mkdtemp()
pkgname = os.path.basename(rpmfile)
baseurl = os.path.dirname(rpmfile)
reldir = self.tempdir
dest = os.path.join(self.tempdir, pkgname)
if not self.conf.quiet:
self.callback.log('\nDownloading %s' % rpmfile)
try:
rpmfile = self.grabber.urlgrab(rpmfile, dest)
except grabber.URLGrabError as e:
raise MDError("Unable to retrieve remote package %s: %s" % (
rpmfile, e))
else:
rpmfile = '%s/%s' % (pkgpath, rpmfile)
external_data = { '_cachedir': self.conf.cachedir,
'_baseurl': baseurl,
'_reldir': reldir,
'_packagenumber': self.current_pkg,
'_collapse_libc_requires':self.conf.collapse_glibc_requires,
}
try:
po = yumbased.CreateRepoPackage(self.ts, rpmfile,
sumtype=self.conf.sumtype,
external_data = external_data)
except Errors.MiscError as e:
raise MDError("Unable to open package: %s" % e)
for r in po.requires_print:
if r.startswith('rpmlib('):
self.rpmlib_reqs[r] = 1
if po.checksum in (None, ""):
raise MDError("No Package ID found for package %s, not going to" \
" add it" % po)
return po
def writeMetadataDocs(self, pkglist=[], pkgpath=None):
if not pkglist:
pkglist = self.conf.pkglist
if not pkgpath:
directory = self.conf.directory
else:
directory = pkgpath
# for worker/forked model
# iterate the pkglist - see which ones are handled by --update and let them
# go on their merry way
newpkgs = []
keptpkgs = []
if self.conf.update:
# if we're in --update mode then only act on the new/changed pkgs
for pkg in pkglist:
self.current_pkg += 1
#see if we can pull the nodes from the old repo
#print self.oldData.basenodes.keys()
old_pkg = pkg
if pkg.find("://") != -1:
old_pkg = os.path.basename(pkg)
old_po = self.oldData.getNodes(old_pkg)
if old_po: # we have a match in the old metadata
if self.conf.verbose:
self.callback.log(_("Using data from old metadata for %s")
% pkg)
keptpkgs.append((pkg, old_po))
#FIXME - if we're in update and we have deltas enabled
# check the presto data for this pkg and write its info back out
# to our deltafile
continue
else:
newpkgs.append(pkg)
else:
newpkgs = pkglist
# setup our reldir
if not pkgpath:
reldir = os.path.join(self.conf.basedir, directory)
else:
reldir = pkgpath
# filter out those pkgs which are not files - but are pkgobjects
pkgfiles = []
for pkg in newpkgs:
po = None
if isinstance(pkg, YumAvailablePackage):
po = pkg
self.read_pkgs.append(po.localPkg())
# if we're dealing with remote pkgs - pitch it over to doing
# them one at a time, for now.
elif pkg.find('://') != -1:
po = self.read_in_package(pkg, pkgpath=pkgpath, reldir=reldir)
self.read_pkgs.append(pkg)
if po:
keptpkgs.append((pkg, po))
continue
pkgfiles.append(pkg)
keptpkgs.sort(reverse=True)
# keptkgs is a list of (filename, po), pkgfiles is a list if filenames.
# Need to write them in sorted(filename) order. We loop over pkgfiles,
# inserting keptpkgs in right spots (using the upto argument).
def save_keptpkgs(upto):
while keptpkgs and (upto is None or keptpkgs[-1][0] < upto):
filename, po = keptpkgs.pop()
# reset baseurl in the old pkg
po.basepath = self.conf.baseurl
self.primaryfile.write(po.xml_dump_primary_metadata())
self.flfile.write(po.xml_dump_filelists_metadata())
self.otherfile.write(po.xml_dump_other_metadata(
clog_limit=self.conf.changelog_limit))
if pkgfiles:
# divide that list by the number of workers and fork off that many
# workers to tmpdirs
# waitfor the workers to finish and as each one comes in
# open the files they created and write them out to our metadata
# add up the total pkg counts and return that value
self._worker_tmp_path = tempfile.mkdtemp() # setting this in the base object so we can clean it up later
if self.conf.workers < 1:
self.conf.workers = min(num_cpus_online(), len(pkgfiles))
pkgfiles.sort()
worker_chunks = split_list_into_equal_chunks(pkgfiles, self.conf.workers)
worker_cmd_dict = {}
worker_jobs = {}
base_worker_cmdline = [self.conf.worker_cmd,
'--pkgoptions=_reldir=%s' % reldir,
'--pkgoptions=_collapse_libc_requires=%s' % self.conf.collapse_glibc_requires,
'--pkgoptions=_cachedir=%s' % self.conf.cachedir,
'--pkgoptions=_baseurl=%s' % self.conf.baseurl,
'--globalopts=clog_limit=%s' % self.conf.changelog_limit,
'--globalopts=sumtype=%s' % self.conf.sumtype, ]
if self.conf.quiet:
base_worker_cmdline.append('--quiet')
if self.conf.verbose:
base_worker_cmdline.append('--verbose')
for worker_num in range(self.conf.workers):
pkl = self._worker_tmp_path + '/pkglist-%s' % worker_num
f = open(pkl, 'w')
f.write('\n'.join(worker_chunks[worker_num]))
f.close()
workercmdline = []
workercmdline.extend(base_worker_cmdline)
workercmdline.append('--pkglist=%s/pkglist-%s' % (self._worker_tmp_path, worker_num))
worker_cmd_dict[worker_num] = workercmdline
for (num, cmdline) in worker_cmd_dict.items():
if not self.conf.quiet:
self.callback.log("Spawning worker %s with %s pkgs" % (num,
len(worker_chunks[num])))
job = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
worker_jobs[num] = job
files = self.primaryfile, self.flfile, self.otherfile
def log_messages(num):
job = worker_jobs[num]
while True:
# check stdout and stderr
for stream in select((job.stdout, job.stderr), (), ())[0]:
line = stream.readline()
if line: break
else:
return # EOF, EOF
if stream is job.stdout:
if line.startswith('*** '):
# get data, save to local files
for out, size in zip(files, line[4:].split()):
out.write(stream.read(int(size)))
return
self.callback.log('Worker %s: %s' % (num, line.rstrip()))
else:
self.callback.errorlog('Worker %s: %s' % (num, line.rstrip()))
for i, pkg in enumerate(pkgfiles):
# insert cached packages
save_keptpkgs(pkg)
# save output to local files
log_messages(i % self.conf.workers)
for (num, job) in worker_jobs.items():
# process remaining messages on stderr
log_messages(num)
if job.wait() != 0:
msg = "Worker exited with non-zero value: %s. Fatal." % job.returncode
self.callback.errorlog(msg)
raise MDError(msg)
if not self.conf.quiet:
self.callback.log("Workers Finished")
for pkgfile in pkgfiles:
if self.conf.deltas:
try:
po = self.read_in_package(pkgfile, pkgpath=pkgpath, reldir=reldir)
self._do_delta_rpm_package(po)
except MDError as e:
errorprint(e)
continue
self.read_pkgs.append(pkgfile)
save_keptpkgs(None) # append anything left
return self.current_pkg
def closeMetadataDocs(self):
# save them up to the tmp locations:
if not self.conf.quiet:
self.callback.log(_('Saving Primary metadata'))
if self.conf.database_only:
self.md_sqlite.pri_cx.close()
else:
self.primaryfile.write('\n</metadata>')
self.primaryfile.close()
if not self.conf.quiet:
self.callback.log(_('Saving file lists metadata'))
if self.conf.database_only:
self.md_sqlite.file_cx.close()
else:
self.flfile.write('\n</filelists>')
self.flfile.close()
if not self.conf.quiet:
self.callback.log(_('Saving other metadata'))
if self.conf.database_only:
self.md_sqlite.other_cx.close()
else:
self.otherfile.write('\n</otherdata>')
self.otherfile.close()
if self.conf.deltas:
deltam_st = time.time()
if not self.conf.quiet:
self.callback.log(_('Saving delta metadata'))
self.deltafile.write(self.generate_delta_xml())
self.deltafile.write('\n</prestodelta>')
self.deltafile.close()
if self.conf.profile:
self.callback.log('deltam time: %0.3f' % (time.time() - deltam_st))
def _do_delta_rpm_package(self, pkg):
"""makes the drpms, if possible, for this package object.
returns the presto/delta xml metadata as a string
"""
drpm_pkg_time = time.time()
# duck and cover if the pkg.size is > whatever
if int(pkg.size) > self.conf.max_delta_rpm_size:
if not self.conf.quiet:
self.callback.log("Skipping %s package " \
"that is > max_delta_rpm_size" % pkg)
return
# generate a list of all the potential 'old rpms'
opd = self._get_old_package_dict()
# for each of our old_package_paths -
# make a drpm from the newest of that pkg
# get list of potential candidates which are likely to match
for d in self.conf.oldpackage_paths:
pot_cand = []
if d not in opd:
continue
for fn in opd[d]:
if os.path.basename(fn).startswith(pkg.name):
pot_cand.append(fn)
candidates = []
for fn in pot_cand:
try:
thispo = yumbased.CreateRepoPackage(self.ts, fn,
sumtype=self.conf.sumtype)
except Errors.MiscError as e:
continue
if (thispo.name, thispo.arch) != (pkg.name, pkg.arch):
# not the same, doesn't matter
continue
if thispo == pkg: #exactly the same, doesn't matter
continue
if thispo.EVR >= pkg.EVR: # greater or equal, doesn't matter
continue
candidates.append(thispo)
candidates.sort()
candidates.reverse()
for delta_p in candidates[0:self.conf.num_deltas]:
#make drpm of pkg and delta_p
dt_st = time.time()
drpmfn = deltarpms.create_drpm(delta_p, pkg, self.conf.deltadir)
if not self.conf.quiet or self.conf.profile:
self.callback.log('created drpm from %s to %s: %s in %0.3f' % (
delta_p, pkg, drpmfn, (time.time() - dt_st)))
if self.conf.profile:
self.callback.log('total drpm time for %s: %0.3f' % (pkg,
(time.time() - drpm_pkg_time)))
def _get_old_package_dict(self):
if hasattr(self, '_old_package_dict'):
return self._old_package_dict
self._old_package_dict = {}
for d in self.conf.oldpackage_paths:
for f in self.getFileList(d, '.rpm'):
fp = d + '/' + f
fpstat = os.stat(fp)
if int(fpstat[stat.ST_SIZE]) > self.conf.max_delta_rpm_size:
self.callback.log("Skipping %s package " \
"that is > max_delta_rpm_size" % f)
continue
if not self._old_package_dict.has_key(d):
self._old_package_dict[d] = []
self._old_package_dict[d].append(d + '/' + f)
return self._old_package_dict
def generate_delta_xml(self):
"""take the delta rpm output dir, process all the drpm files
produce the text output for the presto/delta xml metadata"""
# go through the drpm dir
# for each file -store the drpm info in a dict based on its target. Just
# appending the output. for each of the keys in the dict, return
# the tag for the target + each of the drpm infos + closure for the target
# tag
targets = {}
results = []
for drpm_fn in self.getFileList(self.conf.deltadir, '.drpm'):
drpm_rel_fn = os.path.normpath(self.conf.delta_relative +
'/' + drpm_fn) # this is annoying
drpm_po = yumbased.CreateRepoPackage(self.ts,
self.conf.deltadir + '/' + drpm_fn, sumtype=self.conf.sumtype)
drpm = deltarpms.DeltaRPMPackage(drpm_po, self.conf.outputdir,
drpm_rel_fn)
if not targets.has_key(drpm_po.pkgtup):
targets[drpm_po.pkgtup] = []
targets[drpm_po.pkgtup].append(drpm.xml_dump_metadata())
for (n, a, e, v, r) in targets.keys():
results.append(""" <newpackage name="%s" epoch="%s" version="%s" release="%s" arch="%s">\n""" % (
n, e, v, r, a))
results.extend(targets[(n,a,e,v,r)])
# for src in targets[(n, a, e, v, r)]:
# results.append(src)
results.append(" </newpackage>\n")
return ' '.join(results)
def _createRepoDataObject(self, mdfile, mdtype, compress=True,
compress_type=None, attribs={}):
"""return random metadata as RepoData object to be added to RepoMD
mdfile = complete path to file
mdtype = the metadata type to use
compress = compress the file before including it
"""
# copy the file over here
sfile = os.path.basename(mdfile)
fo = open(mdfile, 'r')
outdir = os.path.join(self.conf.outputdir, self.conf.tempdir)
if not compress_type:
compress_type = self.conf.compress_type
if compress:
sfile = '%s.%s' % (sfile, compress_type)
outfn = os.path.join(outdir, sfile)
output = compressOpen(outfn, mode='wb', compress_type=compress_type)
else:
outfn = os.path.join(outdir, sfile)
output = open(outfn, 'w')
output.write(fo.read())
output.close()
fo.seek(0)
open_csum = misc.checksum(self.conf.sumtype, fo)
fo.close()
if self.conf.unique_md_filenames:
(csum, outfn) = checksum_and_rename(outfn, self.conf.sumtype)
sfile = os.path.basename(outfn)
else:
if compress:
csum = misc.checksum(self.conf.sumtype, outfn)
else:
csum = open_csum
thisdata = RepoData()
thisdata.type = mdtype
thisdata.location = (self.conf.baseurl, os.path.join(self.conf.finaldir, sfile))
thisdata.checksum = (self.conf.sumtype, csum)
if compress:
thisdata.openchecksum = (self.conf.sumtype, open_csum)
thisdata.size = str(os.stat(outfn).st_size)
thisdata.timestamp = str(int(os.stat(outfn).st_mtime))
for (k, v) in attribs.items():
setattr(thisdata, k, str(v))
return thisdata
def doRepoMetadata(self):
"""wrapper to generate the repomd.xml file that stores the info
on the other files"""
repomd = RepoMD('repoid')
repomd.revision = self.conf.revision
repopath = os.path.join(self.conf.outputdir, self.conf.tempdir)
repofilepath = os.path.join(repopath, self.conf.repomdfile)
if self.conf.content_tags:
repomd.tags['content'] = self.conf.content_tags
if self.conf.distro_tags:
repomd.tags['distro'] = self.conf.distro_tags
# NOTE - test out the cpeid silliness here
if self.conf.repo_tags:
repomd.tags['repo'] = self.conf.repo_tags
sumtype = self.conf.sumtype
workfiles = [(self.conf.otherfile, 'other',),
(self.conf.filelistsfile, 'filelists'),
(self.conf.primaryfile, 'primary')]
if self.conf.deltas:
workfiles.append((self.conf.deltafile, 'prestodelta'))
if self.conf.database:
if not self.conf.quiet: self.callback.log('Generating sqlite DBs')
try:
dbversion = str(sqlitecachec.DBVERSION)
except AttributeError:
dbversion = '9'
#FIXME - in theory some sort of try/except here
rp = sqlitecachec.RepodataParserSqlite(repopath, repomd.repoid, None)
for (rpm_file, ftype) in workfiles:
# when we fix y-m-p and non-gzipped xml files - then we can make this just add
# self.conf.compress_type
if ftype in ('other', 'filelists', 'primary'):
rpm_file = rpm_file + '.' + 'gz'
elif rpm_file.find('.') != -1 and rpm_file.split('.')[-1] not in _available_compression:
rpm_file = rpm_file + '.' + self.conf.compress_type
complete_path = os.path.join(repopath, rpm_file)
zfo = compressOpen(complete_path)
# This is misc.checksum() done locally so we can get the size too.
data = misc.Checksums([sumtype])
while data.read(zfo, 2**16):
pass
uncsum = data.hexdigest(sumtype)
unsize = len(data)
zfo.close()
csum = misc.checksum(sumtype, complete_path)
timestamp = os.stat(complete_path)[8]
db_csums = {}
db_compressed_sums = {}
if self.conf.database:
if ftype in ['primary', 'filelists', 'other']:
if self.conf.verbose:
self.callback.log("Starting %s db creation: %s" % (ftype,
time.ctime()))
if ftype == 'primary':
#FIXME - in theory some sort of try/except here
# TypeError appears to be raised, sometimes :(
rp.getPrimary(complete_path, csum)
elif ftype == 'filelists':
#FIXME and here
rp.getFilelists(complete_path, csum)
elif ftype == 'other':
#FIXME and here
rp.getOtherdata(complete_path, csum)
if ftype in ['primary', 'filelists', 'other']:
tmp_result_name = '%s.xml.gz.sqlite' % ftype
tmp_result_path = os.path.join(repopath, tmp_result_name)
good_name = '%s.sqlite' % ftype
resultpath = os.path.join(repopath, good_name)
# compat compression for rhel5 compatibility from fedora :(
compress_type = self.conf.compress_type
if self.compat_compress:
compress_type = 'bz2'
# rename from silly name to not silly name
os.rename(tmp_result_path, resultpath)
compressed_name = '%s.%s' % (good_name, compress_type)
result_compressed = os.path.join(repopath, compressed_name)
db_csums[ftype] = misc.checksum(sumtype, resultpath)
# compress the files
compressFile(resultpath, result_compressed, compress_type)
# csum the compressed file
db_compressed_sums[ftype] = misc.checksum(sumtype,
result_compressed)
# timestamp+size the uncompressed file
un_stat = os.stat(resultpath)
# remove the uncompressed file
os.unlink(resultpath)
if self.conf.unique_md_filenames:
csum_compressed_name = '%s-%s.%s' % (
db_compressed_sums[ftype], good_name, compress_type)
csum_result_compressed = os.path.join(repopath,
csum_compressed_name)
os.rename(result_compressed, csum_result_compressed)
result_compressed = csum_result_compressed
compressed_name = csum_compressed_name
# timestamp+size the compressed file
db_stat = os.stat(result_compressed)
# add this data as a section to the repomdxml
db_data_type = '%s_db' % ftype
data = RepoData()
data.type = db_data_type
data.location = (self.conf.baseurl,
os.path.join(self.conf.finaldir, compressed_name))
data.checksum = (sumtype, db_compressed_sums[ftype])
data.timestamp = str(int(db_stat.st_mtime))
data.size = str(db_stat.st_size)
data.opensize = str(un_stat.st_size)
data.openchecksum = (sumtype, db_csums[ftype])
data.dbversion = dbversion
if self.conf.verbose:
self.callback.log("Ending %s db creation: %s" % (ftype,
time.ctime()))
repomd.repoData[data.type] = data
data = RepoData()
data.type = ftype
data.checksum = (sumtype, csum)
data.timestamp = str(timestamp)
data.size = str(os.stat(os.path.join(repopath, rpm_file)).st_size)
data.opensize = str(unsize)
data.openchecksum = (sumtype, uncsum)
if self.conf.unique_md_filenames:
if ftype in ('primary', 'filelists', 'other'):
compress = 'gz'
else:
compress = self.conf.compress_type
main_name = '.'.join(rpm_file.split('.')[:-1])
res_file = '%s-%s.%s' % (csum, main_name, compress)
orig_file = os.path.join(repopath, rpm_file)
dest_file = os.path.join(repopath, res_file)
os.rename(orig_file, dest_file)
else:
res_file = rpm_file
rpm_file = res_file
href = os.path.join(self.conf.finaldir, rpm_file)
data.location = (self.conf.baseurl, href)
repomd.repoData[data.type] = data
if not self.conf.quiet and self.conf.database:
self.callback.log('Sqlite DBs complete')
if self.conf.groupfile is not None:
mdcontent = self._createRepoDataObject(self.conf.groupfile, 'group_gz')
repomd.repoData[mdcontent.type] = mdcontent
mdcontent = self._createRepoDataObject(self.conf.groupfile, 'group',
compress=False)
repomd.repoData[mdcontent.type] = mdcontent
if self.conf.additional_metadata:
for md_type, md_file in self.conf.additional_metadata.items():
mdcontent = self._createRepoDataObject(md_file, md_type)
repomd.repoData[mdcontent.type] = mdcontent
# FIXME - disabled until we decide how best to use this
#if self.rpmlib_reqs:
# rpmlib = reporoot.newChild(rpmns, 'lib', None)
# for r in self.rpmlib_reqs.keys():
# req = rpmlib.newChild(rpmns, 'requires', r)
# save it down
try:
fo = open(repofilepath, 'w')
fo.write(repomd.dump_xml())
fo.close()
except (IOError, OSError, TypeError) as e:
self.callback.errorlog(
_('Error saving temp file for repomd.xml: %s') % repofilepath)
self.callback.errorlog('Error was: %s') % str(e)
fo.close()
raise MDError('Could not save temp file: %s' % repofilepath)
def doFinalMove(self):
"""move the just-created repodata from .repodata to repodata
also make sure to preserve any files we didn't mess with in the
metadata dir"""
output_final_dir = os.path.join(self.conf.outputdir, self.conf.finaldir)
output_old_dir = os.path.join(self.conf.outputdir, self.conf.olddir)
if os.path.exists(output_final_dir):
try:
os.rename(output_final_dir, output_old_dir)
except:
raise MDError(_('Error moving final %s to old dir %s' % (
output_final_dir, output_old_dir)))
output_temp_dir = os.path.join(self.conf.outputdir, self.conf.tempdir)
try:
os.rename(output_temp_dir, output_final_dir)
except:
# put the old stuff back
os.rename(output_old_dir, output_final_dir)
raise MDError(_('Error moving final metadata into place'))
for f in ['primaryfile', 'filelistsfile', 'otherfile', 'repomdfile',
'groupfile']:
if getattr(self.conf, f):
fn = os.path.basename(getattr(self.conf, f))
else:
continue
oldfile = os.path.join(output_old_dir, fn)
if os.path.exists(oldfile):
try:
os.remove(oldfile)
except OSError as e:
raise MDError(_(
'Could not remove old metadata file: %s: %s') % (oldfile, e))
old_to_remove = []
old_pr = []
old_fl = []
old_ot = []
old_pr_db = []
old_fl_db = []
old_ot_db = []
for f in os.listdir(output_old_dir):
oldfile = os.path.join(output_old_dir, f)
finalfile = os.path.join(output_final_dir, f)
for (end,lst) in (('-primary.sqlite', old_pr_db), ('-primary.xml', old_pr),
('-filelists.sqlite', old_fl_db), ('-filelists.xml', old_fl),
('-other.sqlite', old_ot_db), ('-other.xml', old_ot)):
fn = '.'.join(f.split('.')[:-1])
if fn.endswith(end):
lst.append(oldfile)
break
# make a list of the old metadata files we don't want to remove.
for lst in (old_pr, old_fl, old_ot, old_pr_db, old_fl_db, old_ot_db):
sortlst = sorted(lst, key=lambda x: os.path.getmtime(x),
reverse=True)
for thisf in sortlst[self.conf.retain_old_md:]:
old_to_remove.append(thisf)
for f in os.listdir(output_old_dir):
oldfile = os.path.join(output_old_dir, f)
finalfile = os.path.join(output_final_dir, f)
fn = '.'.join(f.split('.')[:-1])
if fn in ('filelists.sqlite', 'other.sqlite',
'primary.sqlite') or oldfile in old_to_remove:
try:
os.remove(oldfile)
except (OSError, IOError) as e:
raise MDError(_(
'Could not remove old metadata file: %s: %s') % (oldfile, e))
continue
if os.path.exists(finalfile):
# Hmph? Just leave it alone, then.
try:
if os.path.isdir(oldfile):
shutil.rmtree(oldfile)
else:
os.remove(oldfile)
except OSError as e:
raise MDError(_(
'Could not remove old metadata file: %s: %s') % (oldfile, e))
else:
try:
os.rename(oldfile, finalfile)
except OSError as e:
msg = _('Could not restore old non-metadata file: %s -> %s') % (oldfile, finalfile)
msg += _('Error was %s') % e
raise MDError(msg)
self._cleanup_tmp_repodata_dir()
self._cleanup_update_tmp_dir()
self._write_out_read_pkgs_list()
def _cleanup_update_tmp_dir(self):
if not self.conf.update:
return
shutil.rmtree(self.oldData._repo.basecachedir, ignore_errors=True)
shutil.rmtree(self.oldData._repo.base_persistdir, ignore_errors=True)
def _write_out_read_pkgs_list(self):
# write out the read_pkgs_list file with self.read_pkgs
if self.conf.read_pkgs_list:
try:
fo = open(self.conf.read_pkgs_list, 'w')
fo.write('\n'.join(self.read_pkgs))
fo.flush()
fo.close()
except (OSError, IOError) as e:
self.errorlog(_('Could not write out readpkgs list: %s')
% self.conf.read_pkgs_list)
self.errorlog(_('Error was %s') % e)
def _cleanup_tmp_repodata_dir(self):
output_old_dir = os.path.join(self.conf.outputdir, self.conf.olddir)
output_temp_dir = os.path.join(self.conf.outputdir, self.conf.tempdir)
for dirbase in (self.conf.olddir, self.conf.tempdir):
dirpath = os.path.join(self.conf.outputdir, dirbase)
if os.path.exists(dirpath):
try:
os.rmdir(dirpath)
except OSError as e:
self.errorlog(_('Could not remove temp metadata dir: %s')
% dirbase)
self.errorlog(_('Error was %s') % e)
self.errorlog(_('Please clean up this directory manually.'))
# our worker tmp path
if hasattr(self, '_worker_tmp_path') and os.path.exists(self._worker_tmp_path):
shutil.rmtree(self._worker_tmp_path, ignore_errors=True)
def setup_sqlite_dbs(self, initdb=True):
"""sets up the sqlite dbs w/table schemas and db_infos"""
destdir = os.path.join(self.conf.outputdir, self.conf.tempdir)
try:
self.md_sqlite = MetaDataSqlite(destdir)
except sqlite.OperationalError as e:
raise MDError(_('Cannot create sqlite databases: %s.\n' \
'Maybe you need to clean up a .repodata dir?') % e)
class SplitMetaDataGenerator(MetaDataGenerator):
"""takes a series of dirs and creates repodata for all of them
most commonly used with -u media:// - if no outputdir is specified
it will create the repodata in the first dir in the list of dirs
"""
def __init__(self, config_obj=None, callback=None):
MetaDataGenerator.__init__(self, config_obj=config_obj, callback=None)
def _getFragmentUrl(self, url, fragment):
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
urlparse.uses_fragment.append('media')
if not url:
return url
(scheme, netloc, path, query, fragid) = urlparse.urlsplit(url)
return urlparse.urlunsplit((scheme, netloc, path, query, str(fragment)))
def doPkgMetadata(self):
"""all the heavy lifting for the package metadata"""
if len(self.conf.directories) == 1:
MetaDataGenerator.doPkgMetadata(self)
return
if self.conf.update:
self._setup_old_metadata_lookup()
filematrix = {}
for mydir in self.conf.directories:
if os.path.isabs(mydir):
thisdir = mydir
else:
if mydir.startswith('../'):
thisdir = os.path.realpath(mydir)
else:
thisdir = os.path.join(self.conf.basedir, mydir)
filematrix[mydir] = self.getFileList(thisdir, '.rpm')
# pkglist is a bit different for split media, as we have to know
# which dir. it belongs to. So we walk the dir. and then filter.
# We could be faster by not walking the dir. ... but meh.
if self.conf.pkglist:
pkglist = set(self.conf.pkglist)
pkgs = []
for fname in filematrix[mydir]:
if fname not in pkglist:
continue
pkgs.append(fname)
filematrix[mydir] = pkgs
self.trimRpms(filematrix[mydir])
self.pkgcount += len(filematrix[mydir])
mediano = 1
self.current_pkg = 0
self.conf.baseurl = self._getFragmentUrl(self.conf.baseurl, mediano)
try:
self.openMetadataDocs()
for mydir in self.conf.directories:
self.conf.baseurl = self._getFragmentUrl(self.conf.baseurl, mediano)
self.writeMetadataDocs(filematrix[mydir], mydir)
mediano += 1
self.conf.baseurl = self._getFragmentUrl(self.conf.baseurl, 1)
self.closeMetadataDocs()
except (IOError, OSError) as e:
raise MDError(_('Cannot access/write repodata files: %s') % e)
class MetaDataSqlite(object):
def __init__(self, destdir):
self.pri_sqlite_file = os.path.join(destdir, 'primary.sqlite')
self.pri_cx = sqlite.Connection(self.pri_sqlite_file)
self.file_sqlite_file = os.path.join(destdir, 'filelists.sqlite')
self.file_cx = sqlite.Connection(self.file_sqlite_file)
self.other_sqlite_file = os.path.join(destdir, 'other.sqlite')
self.other_cx = sqlite.Connection(self.other_sqlite_file)
self.primary_cursor = self.pri_cx.cursor()
self.filelists_cursor = self.file_cx.cursor()
self.other_cursor = self.other_cx.cursor()
self.create_primary_db()
self.create_filelists_db()
self.create_other_db()
def create_primary_db(self):
# make the tables
schema = [
"""PRAGMA synchronous="OFF";""",
"""pragma locking_mode="EXCLUSIVE";""",
"""CREATE TABLE conflicts ( name TEXT, flags TEXT, epoch TEXT, version TEXT, release TEXT, pkgKey INTEGER );""",
"""CREATE TABLE db_info (dbversion INTEGER, checksum TEXT);""",
"""CREATE TABLE files ( name TEXT, type TEXT, pkgKey INTEGER);""",
"""CREATE TABLE obsoletes ( name TEXT, flags TEXT, epoch TEXT, version TEXT, release TEXT, pkgKey INTEGER );""",
"""CREATE TABLE packages ( pkgKey INTEGER PRIMARY KEY, pkgId TEXT, name TEXT, arch TEXT, version TEXT, epoch TEXT, release TEXT, summary TEXT, description TEXT, url TEXT, time_file INTEGER, time_build INTEGER, rpm_license TEXT, rpm_vendor TEXT, rpm_group TEXT, rpm_buildhost TEXT, rpm_sourcerpm TEXT, rpm_header_start INTEGER, rpm_header_end INTEGER, rpm_packager TEXT, size_package INTEGER, size_installed INTEGER, size_archive INTEGER, location_href TEXT, location_base TEXT, checksum_type TEXT);""",
"""CREATE TABLE provides ( name TEXT, flags TEXT, epoch TEXT, version TEXT, release TEXT, pkgKey INTEGER );""",
"""CREATE TABLE requires ( name TEXT, flags TEXT, epoch TEXT, version TEXT, release TEXT, pkgKey INTEGER , pre BOOL DEFAULT FALSE);""",
"""CREATE INDEX filenames ON files (name);""",
"""CREATE INDEX packageId ON packages (pkgId);""",
"""CREATE INDEX packagename ON packages (name);""",
"""CREATE INDEX pkgconflicts on conflicts (pkgKey);""",
"""CREATE INDEX pkgobsoletes on obsoletes (pkgKey);""",
"""CREATE INDEX pkgprovides on provides (pkgKey);""",
"""CREATE INDEX pkgrequires on requires (pkgKey);""",
"""CREATE INDEX providesname ON provides (name);""",
"""CREATE INDEX requiresname ON requires (name);""",
"""CREATE TRIGGER removals AFTER DELETE ON packages
BEGIN
DELETE FROM files WHERE pkgKey = old.pkgKey;
DELETE FROM requires WHERE pkgKey = old.pkgKey;
DELETE FROM provides WHERE pkgKey = old.pkgKey;
DELETE FROM conflicts WHERE pkgKey = old.pkgKey;
DELETE FROM obsoletes WHERE pkgKey = old.pkgKey;
END;""",
"""INSERT into db_info values (%s, 'direct_create');""" % sqlitecachec.DBVERSION,
]
for cmd in schema:
executeSQL(self.primary_cursor, cmd)
def create_filelists_db(self):
schema = [
"""PRAGMA synchronous="OFF";""",
"""pragma locking_mode="EXCLUSIVE";""",
"""CREATE TABLE db_info (dbversion INTEGER, checksum TEXT);""",
"""CREATE TABLE filelist ( pkgKey INTEGER, dirname TEXT, filenames TEXT, filetypes TEXT);""",
"""CREATE TABLE packages ( pkgKey INTEGER PRIMARY KEY, pkgId TEXT);""",
"""CREATE INDEX dirnames ON filelist (dirname);""",
"""CREATE INDEX keyfile ON filelist (pkgKey);""",
"""CREATE INDEX pkgId ON packages (pkgId);""",
"""CREATE TRIGGER remove_filelist AFTER DELETE ON packages
BEGIN
DELETE FROM filelist WHERE pkgKey = old.pkgKey;
END;""",
"""INSERT into db_info values (%s, 'direct_create');""" % sqlitecachec.DBVERSION,
]
for cmd in schema:
executeSQL(self.filelists_cursor, cmd)
def create_other_db(self):
schema = [
"""PRAGMA synchronous="OFF";""",
"""pragma locking_mode="EXCLUSIVE";""",
"""CREATE TABLE changelog ( pkgKey INTEGER, author TEXT, date INTEGER, changelog TEXT);""",
"""CREATE TABLE db_info (dbversion INTEGER, checksum TEXT);""",
"""CREATE TABLE packages ( pkgKey INTEGER PRIMARY KEY, pkgId TEXT);""",
"""CREATE INDEX keychange ON changelog (pkgKey);""",
"""CREATE INDEX pkgId ON packages (pkgId);""",
"""CREATE TRIGGER remove_changelogs AFTER DELETE ON packages
BEGIN
DELETE FROM changelog WHERE pkgKey = old.pkgKey;
END;""",
"""INSERT into db_info values (%s, 'direct_create');""" % sqlitecachec.DBVERSION,
]
for cmd in schema:
executeSQL(self.other_cursor, cmd)
| 28,559 | 26,581 | 366 |
0eeccddff39a938a9bf70a0b4760664618aed5eb | 11,176 | py | Python | BERT/bert_utils.py | HenryPaik1/paper_implement | fe1204209ab0830d8c58618218a8f2c0a1325721 | [
"MIT"
] | null | null | null | BERT/bert_utils.py | HenryPaik1/paper_implement | fe1204209ab0830d8c58618218a8f2c0a1325721 | [
"MIT"
] | null | null | null | BERT/bert_utils.py | HenryPaik1/paper_implement | fe1204209ab0830d8c58618218a8f2c0a1325721 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import json
def create_pretrain_mask(tokens, mask_cnt, vocab_list):
"""
masking subwords(15% of entire subwords)
- mask_cnt: len(subwords) * 0.15
- [MASK]: 80% of masking candidate token
- original token: 10% of masking candidate token
- another token: 10% of masking candidate token
"""
candidate_idx = []
## subwords in the same list augment a sementic word
## eg. [[0], [1], [2], [4, 5]] -> token_idx 4 + 5 is semantic word
# A list represent a sementic word
for i, token in enumerate(tokens):
if token == '[CLS]' or token == '[SEP]':
continue
if 0 < len(candidate_idx) and token.find(u'\u2581') < 0: # LOWER ONE EIGHTH BLOCK
# if 0 < len(candidate_idx) and token.find('_') < 0: # test code
candidate_idx[-1].append(i)
else:
candidate_idx.append([i])
np.random.shuffle(candidate_idx)
mask_lms = []
for idx_set in candidate_idx:
# check if len(mask_lms) exceeds threshold
if len(mask_lms) >= mask_cnt:
break
if len(mask_lms) + len(idx_set) > mask_cnt:
continue
## masking subwords with 15% probability
## mask_cnt is len(subwords) * 0.15
# iter subwords idx
for sub_idx in idx_set:
masked_token = None
### assign value to masked token: [MASK], original token, random token
# 80% of masking candidate are replaced with '[MASK]' token
if np.random.uniform() < 0.8:
masked_token = '[MASK]'
# remainng 20% of masking candidate
else:
# 10% of remaining preserve original token
if np.random.uniform() < 0.5:
masked_token = tokens[sub_idx]
# 10% of ones are replaced with rnadom token
else:
masked_token = np.random.choice(vocab_list)
### replace subword with masked_token value
mask_lms.append({'idx': sub_idx, 'label':tokens[sub_idx]})
tokens[sub_idx] = masked_token
mask_lms = sorted(mask_lms, key=lambda x: x['idx'])
mask_idx = [mask_dict['idx'] for mask_dict in mask_lms]
mask_label = [mask_dict['label'] for mask_dict in mask_lms]
# print(candidate_idx)
# print(mask_lms)
print(mask_idx, mask_label)
return tokens, mask_idx, k_label
def truncate_token(tokenA, tokenB, max_seq):
"""
truncate long sequence
"""
while True:
total_len = len(tokenA) + len(tokenB)
print('max token {}\ntotal_len {} = {} + {}'.format(max_seq, total_len, len(tokenA), len(tokenB)))
if total_len <= max_seq:
break
if len(tokenA) > len(tokenB):
tokenA.pop()
else:
tokenB.pop()
def create_pretrain_instances(paragraph_ls, paragraph_idx, paragraph, n_seq, mask_prob, vocab_list):
"""
create NSP train set
"""
# 3 special token: [CLS], [SEP] for sent A, [SEP] for sent B
max_seq_len = n_seq - 2 - 1
target_seq_len = max_seq_len # [CLS], segmentA, segmentA, ..., [SEP], segmentB, segmentB, ...
instances = []
temp_sentence = []
temp_sent_seq_length = 0 # num of tokens
max_num_tokens = 256
target_seq_len = np.random.randint(2, max_num_tokens) # min len of tokens
for i, sent in enumerate(paragraph):
## A. not the last sentence of the paragraph
temp_sentence.append(sent)
temp_sent_seq_length += len(sent)
## B. check if it is the last sentence of the paragraph
## or temp_sent_seq_length is longer than or equal to target_seq_len
if i == len(paragraph) - 1 or temp_sent_seq_length >= target_seq_len:
if temp_sentence:
## A. sentence A segment: from 0 to a_end
a_end = 1
if len(temp_sentence) != 1:
a_end = np.random.randint(1, len(temp_sentence))
# append the sentences to tokenA
# from the front to the back
tokenA = []
for _, s in enumerate(temp_sentence[:a_end]):
tokenA.extend(s)
## B. sentence B segment
tokenB = []
# A. Actual next
# is_next will be the label for NSP pretrain
if len(temp_sentence) > 1 and np.random.uniform() >= 0.5:
is_next = True
for j in range(a_end, len(temp_sentence)):
tokenB.extend(temp_sentence[j])
# B. random next
else:
is_next = False
tokenB_len = target_seq_len - len(tokenA)
random_para_idx = para_idx
while para_idx == random_para_idx:
random_para_idx = np.random.randint(0, len(paragraph_ls))
random_para = paragraph[random_para_idx]
random_start = np.random.randint(0, len(random_para))
for j in range(random_start, len(random_para)):
tokenB.extend(random_para[j])
truncate_token(tokenA, tokenB, max_seq)
assert 0 < len(tokenA)
assert 0 < len(tokenB)
tokens = ["[CLS]"] + tokenA + ["[SEP]"] + tokenB + ["[SEP]"]
segment = [0]*(len(tokenA) + 2) + [1]*(len(tokenB) + 1)
tokens, mask_idx, mask_label = \
create_pretrain_mask(tokens, int((len(tokens)-3)*mask_prob), vocab_list)
instance = {
'tokens': tokens,
'segment': segment,
'is_next': is_next,
'mask_idx': mask_idx,
'mask_label': mask_label
}
instances.append(instance)
# reset segment candidate
temp_sentence = []
temp_sent_seq_length = 0
return instances
def make_pretrain_data(vocab, in_file, out_file, count, n_seq, mask_prob):
"""
read text and return train data set format
"""
vocab_list = []
for id_ in range(vocab.get_piece_size()):
if not vocab.is_unknown(id_):
vocab_list.append(vocab.id_to_piece(id_))
paragraph_ls = []
with open(in_file, 'r') as in_f:
paragraph = []
for i, sent in enumerate(in_f):
sent = sent.strip()
## blank means end of the paragraph
if sent == '':
# if not the beggining of the paragraph
# it is the end of the paragraph
if 0 < len(paragraph):
paragraph_ls.append(paragraph)
paragraph = [] # generate new paragraph list
# check if exceeding 100 thaousand paragraphs
if 1e+5 < len(paragraph_ls):
break
## subwords in list is part of semantic token
# eg. ['▁지','미','▁카','터']
else:
pieces = vocab.encode_as_pieces(sent)
if 0 < len(pieces):
paragraph.append(pieces)
if paragraph:
paragraph_ls.append(paragraph)
# masking def: create_pretrain_mask
for index in range(count):
output = out_file.format(index)
# if os.path.isfile(output):
# continue
with open(output, 'w') as out_f:
for i, paragraph in enumerate(paragraph_ls):
masking_info = create_pretrain_instances(paragraph_ls, i, paragraph, n_seq, mask_prob, vocab_list)
for elem in masking_info:
out_f.write(json.dumps(elem))
out_f.write('\n')
class PretrainDataset(Dataset):
"""
eg. instance
{tokens:
['[CLS]', '▁지', ', '대학교', '를', '▁졸업', '하였다', '.', '▁그', '▁후', ...],
segment:
[0, 0, 0, 0, 0, 0, ..., 1, 1, 1],
is_next: True,
mask_idx:
[16, 21, ..., 41],
mask_label:
['▁192', '▁1', '일', '▁~', '는', ..., '▁조지', '법을']}
"""
def pretrain_collate_fn(inputs):
"""
padding batch
"""
labels_cls, labels_lm, inputs, segments = list(zip(*inputs))
labels_lm = torch.nn.utils.rnn.pad_sequence(labels_lm, batch_first=True, padding_value=-1)
inputs = torch.nn.utils.rnn.pad_sequence(inputs, batch_first=True, padding_value=0)
segments = torch.nn.utils.rnn.pad_sequence(segments, batch_first=True, padding_value=0)
batch = [
torch.stack(labels_cls, dim=0),
labels_lm,
inputs,
segments,
]
return batch | 38.013605 | 114 | 0.548676 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import json
def create_pretrain_mask(tokens, mask_cnt, vocab_list):
"""
masking subwords(15% of entire subwords)
- mask_cnt: len(subwords) * 0.15
- [MASK]: 80% of masking candidate token
- original token: 10% of masking candidate token
- another token: 10% of masking candidate token
"""
candidate_idx = []
## subwords in the same list augment a sementic word
## eg. [[0], [1], [2], [4, 5]] -> token_idx 4 + 5 is semantic word
# A list represent a sementic word
for i, token in enumerate(tokens):
if token == '[CLS]' or token == '[SEP]':
continue
if 0 < len(candidate_idx) and token.find(u'\u2581') < 0: # LOWER ONE EIGHTH BLOCK
# if 0 < len(candidate_idx) and token.find('_') < 0: # test code
candidate_idx[-1].append(i)
else:
candidate_idx.append([i])
np.random.shuffle(candidate_idx)
mask_lms = []
for idx_set in candidate_idx:
# check if len(mask_lms) exceeds threshold
if len(mask_lms) >= mask_cnt:
break
if len(mask_lms) + len(idx_set) > mask_cnt:
continue
## masking subwords with 15% probability
## mask_cnt is len(subwords) * 0.15
# iter subwords idx
for sub_idx in idx_set:
masked_token = None
### assign value to masked token: [MASK], original token, random token
# 80% of masking candidate are replaced with '[MASK]' token
if np.random.uniform() < 0.8:
masked_token = '[MASK]'
# remainng 20% of masking candidate
else:
# 10% of remaining preserve original token
if np.random.uniform() < 0.5:
masked_token = tokens[sub_idx]
# 10% of ones are replaced with rnadom token
else:
masked_token = np.random.choice(vocab_list)
### replace subword with masked_token value
mask_lms.append({'idx': sub_idx, 'label':tokens[sub_idx]})
tokens[sub_idx] = masked_token
mask_lms = sorted(mask_lms, key=lambda x: x['idx'])
mask_idx = [mask_dict['idx'] for mask_dict in mask_lms]
mask_label = [mask_dict['label'] for mask_dict in mask_lms]
# print(candidate_idx)
# print(mask_lms)
print(mask_idx, mask_label)
return tokens, mask_idx, k_label
def truncate_token(tokenA, tokenB, max_seq):
"""
truncate long sequence
"""
while True:
total_len = len(tokenA) + len(tokenB)
print('max token {}\ntotal_len {} = {} + {}'.format(max_seq, total_len, len(tokenA), len(tokenB)))
if total_len <= max_seq:
break
if len(tokenA) > len(tokenB):
tokenA.pop()
else:
tokenB.pop()
def create_pretrain_instances(paragraph_ls, paragraph_idx, paragraph, n_seq, mask_prob, vocab_list):
"""
create NSP train set
"""
# 3 special token: [CLS], [SEP] for sent A, [SEP] for sent B
max_seq_len = n_seq - 2 - 1
target_seq_len = max_seq_len # [CLS], segmentA, segmentA, ..., [SEP], segmentB, segmentB, ...
instances = []
temp_sentence = []
temp_sent_seq_length = 0 # num of tokens
max_num_tokens = 256
target_seq_len = np.random.randint(2, max_num_tokens) # min len of tokens
for i, sent in enumerate(paragraph):
## A. not the last sentence of the paragraph
temp_sentence.append(sent)
temp_sent_seq_length += len(sent)
## B. check if it is the last sentence of the paragraph
## or temp_sent_seq_length is longer than or equal to target_seq_len
if i == len(paragraph) - 1 or temp_sent_seq_length >= target_seq_len:
if temp_sentence:
## A. sentence A segment: from 0 to a_end
a_end = 1
if len(temp_sentence) != 1:
a_end = np.random.randint(1, len(temp_sentence))
# append the sentences to tokenA
# from the front to the back
tokenA = []
for _, s in enumerate(temp_sentence[:a_end]):
tokenA.extend(s)
## B. sentence B segment
tokenB = []
# A. Actual next
# is_next will be the label for NSP pretrain
if len(temp_sentence) > 1 and np.random.uniform() >= 0.5:
is_next = True
for j in range(a_end, len(temp_sentence)):
tokenB.extend(temp_sentence[j])
# B. random next
else:
is_next = False
tokenB_len = target_seq_len - len(tokenA)
random_para_idx = para_idx
while para_idx == random_para_idx:
random_para_idx = np.random.randint(0, len(paragraph_ls))
random_para = paragraph[random_para_idx]
random_start = np.random.randint(0, len(random_para))
for j in range(random_start, len(random_para)):
tokenB.extend(random_para[j])
truncate_token(tokenA, tokenB, max_seq)
assert 0 < len(tokenA)
assert 0 < len(tokenB)
tokens = ["[CLS]"] + tokenA + ["[SEP]"] + tokenB + ["[SEP]"]
segment = [0]*(len(tokenA) + 2) + [1]*(len(tokenB) + 1)
tokens, mask_idx, mask_label = \
create_pretrain_mask(tokens, int((len(tokens)-3)*mask_prob), vocab_list)
instance = {
'tokens': tokens,
'segment': segment,
'is_next': is_next,
'mask_idx': mask_idx,
'mask_label': mask_label
}
instances.append(instance)
# reset segment candidate
temp_sentence = []
temp_sent_seq_length = 0
return instances
def make_pretrain_data(vocab, in_file, out_file, count, n_seq, mask_prob):
"""
read text and return train data set format
"""
vocab_list = []
for id_ in range(vocab.get_piece_size()):
if not vocab.is_unknown(id_):
vocab_list.append(vocab.id_to_piece(id_))
paragraph_ls = []
with open(in_file, 'r') as in_f:
paragraph = []
for i, sent in enumerate(in_f):
sent = sent.strip()
## blank means end of the paragraph
if sent == '':
# if not the beggining of the paragraph
# it is the end of the paragraph
if 0 < len(paragraph):
paragraph_ls.append(paragraph)
paragraph = [] # generate new paragraph list
# check if exceeding 100 thaousand paragraphs
if 1e+5 < len(paragraph_ls):
break
## subwords in list is part of semantic token
# eg. ['▁지','미','▁카','터']
else:
pieces = vocab.encode_as_pieces(sent)
if 0 < len(pieces):
paragraph.append(pieces)
if paragraph:
paragraph_ls.append(paragraph)
# masking def: create_pretrain_mask
for index in range(count):
output = out_file.format(index)
# if os.path.isfile(output):
# continue
with open(output, 'w') as out_f:
for i, paragraph in enumerate(paragraph_ls):
masking_info = create_pretrain_instances(paragraph_ls, i, paragraph, n_seq, mask_prob, vocab_list)
for elem in masking_info:
out_f.write(json.dumps(elem))
out_f.write('\n')
class PretrainDataset(Dataset):
"""
eg. instance
{tokens:
['[CLS]', '▁지', ', '대학교', '를', '▁졸업', '하였다', '.', '▁그', '▁후', ...],
segment:
[0, 0, 0, 0, 0, 0, ..., 1, 1, 1],
is_next: True,
mask_idx:
[16, 21, ..., 41],
mask_label:
['▁192', '▁1', '일', '▁~', '는', ..., '▁조지', '법을']}
"""
def __init__(self, vocab, infile):
self.vocab = vocab
self.labels_cls = []
self.label_lm_ls = []
self.sentence_ls = []
self.segments = []
with open(infile, 'r') as f:
for i, line in enumerate(f):
instance = json.loads(line)
self.labels_cls.append(instance['is_next'])
sentence = [vocab.piece_to_id(p) for p in instance['tokens']]
self.sentence_ls.append(sentence)
self.segments.append(instance['segment'])
mask_idx = np.array(instance['mask_idx'], dtype=np.int)
mask_label = np.array([vocab.piece_to_id(p) for p in instance['mask_label']], dtype=np.int)
label_lm = np.full(len(sentence), dtype=np.int, fill_value=-1)
label_lm[mask_idx] = mask_label
self.label_lm_ls.append(label_lm)
def __len__(self):
assert len(self.labels_cls) == len(self.label_lm_ls)
assert len(self.labels_cls) == len(self.sentence_ls)
assert len(self.labels_cls) == len(self.segments)
return len(self.labels_cls)
def __getitem__(self, idx):
return (torch.tensor(self.labels_cls[idx]),
torch.tensor(self.label_lm_ls[idx]),
torch.tensor(self.sentence_ls[idx]),
torch.tensor(self.segments[idx]),)
def pretrain_collate_fn(inputs):
"""
padding batch
"""
labels_cls, labels_lm, inputs, segments = list(zip(*inputs))
labels_lm = torch.nn.utils.rnn.pad_sequence(labels_lm, batch_first=True, padding_value=-1)
inputs = torch.nn.utils.rnn.pad_sequence(inputs, batch_first=True, padding_value=0)
segments = torch.nn.utils.rnn.pad_sequence(segments, batch_first=True, padding_value=0)
batch = [
torch.stack(labels_cls, dim=0),
labels_lm,
inputs,
segments,
]
return batch
def train_epoch(config, epoch, model, criterion_lm, criterion_cls, optimizer, train_loader):
loss_ls = []
model.train()
print('model train')
for i, value in enumerate(train_loader):
labels_cls, labels_lm, inputs, segments = map(lambda x: x.to(config.device), value)
optimizer.zero_grad()
outputs = model(inputs, segments)
logits_cls, logits_lm = outputs[0], outputs[1]
loss_cls = criterion_cls(logits_cls, labels_cls)
loss_lm = criterion_lm(logits_lm.view(-1, logits_lm.size(2)), labels_lm.view(-1))
loss = loss_cls + loss_lm
loss_val = loss_lm.item()
loss_ls.append(loss_val)
loss.backward()
optimizer.step()
return np.mean(loss_ls) | 2,113 | 0 | 111 |
bda34656d83aebce4f85163d20f15b0987373b21 | 735 | py | Python | covidscholar_web/scraping/journal_prediction.py | COVID-19-Text-Mining/website | d0314290d61431ddf694d64d96fb15fc872110cd | [
"MIT"
] | null | null | null | covidscholar_web/scraping/journal_prediction.py | COVID-19-Text-Mining/website | d0314290d61431ddf694d64d96fb15fc872110cd | [
"MIT"
] | null | null | null | covidscholar_web/scraping/journal_prediction.py | COVID-19-Text-Mining/website | d0314290d61431ddf694d64d96fb15fc872110cd | [
"MIT"
] | null | null | null | from matscholar import Rester
import bson
import tqdm
import os
import pymongo
client = pymongo.MongoClient('mongodb+srv://%s:%s@matstract-kve41.mongodb.net/test:27017' %
(os.getenv('ATLAS_USER_RW'), os.getenv('ATLAS_USER_PASSWORD_RW')), authSource='admin')
db = client['matstract_db']
c = db.MRS_abstracts
LIMIT = 0
rester = Rester()
print(c.count_documents({}, limit=5))
for d in tqdm.tqdm(c.find({}, limit=LIMIT)):
id = bson.ObjectId(d["_id"])
suggestions = rester.get_journal_suggestion(abstract=d["abstract"])
# print(d)
c.update({"_id": id}, {"$set": {"journal_suggestions": suggestions}})
# print(d["abstract"])
# print(suggestions)
# print("-----------\n\n\n\n")
| 26.25 | 115 | 0.647619 | from matscholar import Rester
import bson
import tqdm
import os
import pymongo
client = pymongo.MongoClient('mongodb+srv://%s:%s@matstract-kve41.mongodb.net/test:27017' %
(os.getenv('ATLAS_USER_RW'), os.getenv('ATLAS_USER_PASSWORD_RW')), authSource='admin')
db = client['matstract_db']
c = db.MRS_abstracts
LIMIT = 0
rester = Rester()
print(c.count_documents({}, limit=5))
for d in tqdm.tqdm(c.find({}, limit=LIMIT)):
id = bson.ObjectId(d["_id"])
suggestions = rester.get_journal_suggestion(abstract=d["abstract"])
# print(d)
c.update({"_id": id}, {"$set": {"journal_suggestions": suggestions}})
# print(d["abstract"])
# print(suggestions)
# print("-----------\n\n\n\n")
| 0 | 0 | 0 |
637ffeeae8697f699c352d40c502c391e37f239e | 6,353 | py | Python | opensnips/old/docker-images/rasa/snips_services/rasa_snips_extensions.py | syntithenai/opensnips | dd3dc629082ab8400da6fcdbf7d4ad5baf877848 | [
"BSD-2-Clause"
] | 57 | 2017-12-28T22:50:20.000Z | 2022-01-25T16:05:36.000Z | opensnips/old/docker-images/rasa/snips_services/rasa_snips_extensions.py | syntithenai/opensnips | dd3dc629082ab8400da6fcdbf7d4ad5baf877848 | [
"BSD-2-Clause"
] | 28 | 2018-04-18T06:45:20.000Z | 2022-03-08T22:50:50.000Z | opensnips/old/docker-images/rasa/snips_services/rasa_snips_extensions.py | syntithenai/opensnips | dd3dc629082ab8400da6fcdbf7d4ad5baf877848 | [
"BSD-2-Clause"
] | 18 | 2017-12-27T01:57:14.000Z | 2021-03-02T14:13:06.000Z | #!/usr/local/bin/python
# -*-: coding utf-8 -*-
""" Snips core and nlu server. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_core.agent import Agent
import os
import os.path
import re
from rasa_core.domain import TemplateDomain
from rasa_core.featurizers import Featurizer
from rasa_core.interpreter import NaturalLanguageInterpreter
from rasa_core.policies.ensemble import PolicyEnsemble
from rasa_core.utils import read_yaml_file
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_nlu.utils.md_to_json import MarkdownToJson
from rasa_nlu.utils.md_to_json import comment_regex,synonym_regex,intent_regex,INTENT_PARSING_STATE,SYNONYM_PARSING_STATE
# Customised Agent class to use custom SnipsDomain and pass core server through to the Domain for scope access
# Customised Domain to allow reference to core server for access to sessionId and other server scope.
| 43.217687 | 186 | 0.668031 | #!/usr/local/bin/python
# -*-: coding utf-8 -*-
""" Snips core and nlu server. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_core.agent import Agent
import os
import os.path
import re
from rasa_core.domain import TemplateDomain
from rasa_core.featurizers import Featurizer
from rasa_core.interpreter import NaturalLanguageInterpreter
from rasa_core.policies.ensemble import PolicyEnsemble
from rasa_core.utils import read_yaml_file
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_nlu.utils.md_to_json import MarkdownToJson
from rasa_nlu.utils.md_to_json import comment_regex,synonym_regex,intent_regex,INTENT_PARSING_STATE,SYNONYM_PARSING_STATE
# Customised Agent class to use custom SnipsDomain and pass core server through to the Domain for scope access
class SnipsMqttAgent(Agent):
@staticmethod
# for training
# tracker_store=None,,core_server=None
def createAgent(path, interpreter=None, featurizer = None,policies =[MemoizationPolicy(), KerasPolicy()],action_factory = 'snips_factory.snips_action_factory', tracker_store = None):
print ('CRETE AGENT {}'.format(path))
# type: (Text, Any, Optional[TrackerStore]) -> Agent
if path is None:
raise ValueError("No domain path specified.")
domain = SnipsDomain.load(os.path.join(path, "domain.yml"),action_factory)
# ensures the domain hasn't changed between test and train
#domain.compare_with_specification(path)
#featurizer = self._create_featurizer(featurizer)
#ensemble = self._create_ensemble(policies)
#_interpreter = NaturalLanguageInterpreter.create(interpreter)
#_tracker_store = None #SnipsMqttAgent.create_tracker_store(tracker_store, domain)
print("CREATED SNIPS AGENT")
return SnipsMqttAgent(domain, policies, featurizer, interpreter, tracker_store)
@staticmethod
# for lookup
def loadAgent(path, interpreter=None, tracker_store=None,action_factory=None,core_server=None):
# type: (Text, Any, Optional[TrackerStore]) -> Agent
if path is None:
raise ValueError("No domain path specified.")
domain = SnipsDomain.load(os.path.join(path, "domain.yml"),action_factory,core_server)
# ensures the domain hasn't changed between test and train
domain.compare_with_specification(path)
featurizer = Featurizer.load(path)
ensemble = PolicyEnsemble.load(path, featurizer)
_interpreter = NaturalLanguageInterpreter.create(interpreter)
_tracker_store = SnipsMqttAgent.create_tracker_store(tracker_store, domain)
print("CREATED SNIPS AGENT")
return SnipsMqttAgent(domain, ensemble, featurizer, _interpreter, _tracker_store)
# Customised Domain to allow reference to core server for access to sessionId and other server scope.
class SnipsDomain(TemplateDomain):
def __init__(self, intents, entities, slots, templates, action_classes,
action_names, action_factory, topics, core_server = None, **kwargs):
self._intents = intents
self._entities = entities
self._slots = slots
self._templates = templates
self._action_classes = action_classes
self._action_names = action_names
self._factory_name = action_factory
self.core_server = core_server
self._actions = self.instantiate_actions(
action_factory, action_classes, action_names, templates)
print("CREATED SNIPS DOMAIN")
super(TemplateDomain, self).__init__(topics, **kwargs)
@classmethod
def load(cls, filename, action_factory=None,core_server=None):
if not os.path.isfile(filename):
raise Exception(
"Failed to load domain specification from '{}'. "
"File not found!".format(os.path.abspath(filename)))
cls.validate_domain_yaml(filename)
data = read_yaml_file(filename)
utter_templates = cls.collect_templates(data.get("templates", {}))
if not action_factory:
action_factory = data.get("action_factory", None)
topics = [Topic(name) for name in data.get("topics", [])]
slots = cls.collect_slots(data.get("slots", {}))
additional_arguments = data.get("config", {})
print("LOADED SNIPS DOMAIN")
return SnipsDomain(
data.get("intents", []),
data.get("entities", []),
slots,
utter_templates,
data.get("actions", []),
data.get("action_names", []),
action_factory,
topics,
core_server,
**additional_arguments
)
class SnipsMarkdownToJson(MarkdownToJson):
def __init__(self, markdown):
self.markdown = markdown
# set when parsing examples from a given intent
self.current_intent = None
self.common_examples = []
self.entity_synonyms = []
self.interpret(markdown)
def interpret(self,markdown):
"""Parse the content of the actual .md file."""
from rasa_nlu.utils.md_to_json import strip_comments
f_com_rmved = strip_comments(comment_regex,self.markdown)# Strip comments
for row in f_com_rmved:
# Remove white-space which may have crept in due to comments
row = row.strip()
intent_match = re.search(intent_regex, row)
if intent_match is not None:
self._set_current_state(
INTENT_PARSING_STATE, intent_match.group(1))
continue
synonym_match = re.search(synonym_regex, row)
if synonym_match is not None:
self._set_current_state(
SYNONYM_PARSING_STATE, synonym_match.group(1))
continue
print("PARSE NLU ROW {}".format(row))
self._parse_intent_or_synonym_example(row)
return {
"rasa_nlu_data": {
"common_examples": self.common_examples,
"entity_synonyms": self.entity_synonyms
}
}
| 3,758 | 1,446 | 75 |
e5cfb5b96c3a2c5050fcd36081bb94d65aafd6f9 | 6,407 | py | Python | loldib/getratings/models/NA/na_brand/na_brand_mid.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_brand/na_brand_mid.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_brand/na_brand_mid.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
| 15.364508 | 46 | 0.761667 | from getratings.models.ratings import Ratings
class NA_Brand_Mid_Aatrox(Ratings):
pass
class NA_Brand_Mid_Ahri(Ratings):
pass
class NA_Brand_Mid_Akali(Ratings):
pass
class NA_Brand_Mid_Alistar(Ratings):
pass
class NA_Brand_Mid_Amumu(Ratings):
pass
class NA_Brand_Mid_Anivia(Ratings):
pass
class NA_Brand_Mid_Annie(Ratings):
pass
class NA_Brand_Mid_Ashe(Ratings):
pass
class NA_Brand_Mid_AurelionSol(Ratings):
pass
class NA_Brand_Mid_Azir(Ratings):
pass
class NA_Brand_Mid_Bard(Ratings):
pass
class NA_Brand_Mid_Blitzcrank(Ratings):
pass
class NA_Brand_Mid_Brand(Ratings):
pass
class NA_Brand_Mid_Braum(Ratings):
pass
class NA_Brand_Mid_Caitlyn(Ratings):
pass
class NA_Brand_Mid_Camille(Ratings):
pass
class NA_Brand_Mid_Cassiopeia(Ratings):
pass
class NA_Brand_Mid_Chogath(Ratings):
pass
class NA_Brand_Mid_Corki(Ratings):
pass
class NA_Brand_Mid_Darius(Ratings):
pass
class NA_Brand_Mid_Diana(Ratings):
pass
class NA_Brand_Mid_Draven(Ratings):
pass
class NA_Brand_Mid_DrMundo(Ratings):
pass
class NA_Brand_Mid_Ekko(Ratings):
pass
class NA_Brand_Mid_Elise(Ratings):
pass
class NA_Brand_Mid_Evelynn(Ratings):
pass
class NA_Brand_Mid_Ezreal(Ratings):
pass
class NA_Brand_Mid_Fiddlesticks(Ratings):
pass
class NA_Brand_Mid_Fiora(Ratings):
pass
class NA_Brand_Mid_Fizz(Ratings):
pass
class NA_Brand_Mid_Galio(Ratings):
pass
class NA_Brand_Mid_Gangplank(Ratings):
pass
class NA_Brand_Mid_Garen(Ratings):
pass
class NA_Brand_Mid_Gnar(Ratings):
pass
class NA_Brand_Mid_Gragas(Ratings):
pass
class NA_Brand_Mid_Graves(Ratings):
pass
class NA_Brand_Mid_Hecarim(Ratings):
pass
class NA_Brand_Mid_Heimerdinger(Ratings):
pass
class NA_Brand_Mid_Illaoi(Ratings):
pass
class NA_Brand_Mid_Irelia(Ratings):
pass
class NA_Brand_Mid_Ivern(Ratings):
pass
class NA_Brand_Mid_Janna(Ratings):
pass
class NA_Brand_Mid_JarvanIV(Ratings):
pass
class NA_Brand_Mid_Jax(Ratings):
pass
class NA_Brand_Mid_Jayce(Ratings):
pass
class NA_Brand_Mid_Jhin(Ratings):
pass
class NA_Brand_Mid_Jinx(Ratings):
pass
class NA_Brand_Mid_Kalista(Ratings):
pass
class NA_Brand_Mid_Karma(Ratings):
pass
class NA_Brand_Mid_Karthus(Ratings):
pass
class NA_Brand_Mid_Kassadin(Ratings):
pass
class NA_Brand_Mid_Katarina(Ratings):
pass
class NA_Brand_Mid_Kayle(Ratings):
pass
class NA_Brand_Mid_Kayn(Ratings):
pass
class NA_Brand_Mid_Kennen(Ratings):
pass
class NA_Brand_Mid_Khazix(Ratings):
pass
class NA_Brand_Mid_Kindred(Ratings):
pass
class NA_Brand_Mid_Kled(Ratings):
pass
class NA_Brand_Mid_KogMaw(Ratings):
pass
class NA_Brand_Mid_Leblanc(Ratings):
pass
class NA_Brand_Mid_LeeSin(Ratings):
pass
class NA_Brand_Mid_Leona(Ratings):
pass
class NA_Brand_Mid_Lissandra(Ratings):
pass
class NA_Brand_Mid_Lucian(Ratings):
pass
class NA_Brand_Mid_Lulu(Ratings):
pass
class NA_Brand_Mid_Lux(Ratings):
pass
class NA_Brand_Mid_Malphite(Ratings):
pass
class NA_Brand_Mid_Malzahar(Ratings):
pass
class NA_Brand_Mid_Maokai(Ratings):
pass
class NA_Brand_Mid_MasterYi(Ratings):
pass
class NA_Brand_Mid_MissFortune(Ratings):
pass
class NA_Brand_Mid_MonkeyKing(Ratings):
pass
class NA_Brand_Mid_Mordekaiser(Ratings):
pass
class NA_Brand_Mid_Morgana(Ratings):
pass
class NA_Brand_Mid_Nami(Ratings):
pass
class NA_Brand_Mid_Nasus(Ratings):
pass
class NA_Brand_Mid_Nautilus(Ratings):
pass
class NA_Brand_Mid_Nidalee(Ratings):
pass
class NA_Brand_Mid_Nocturne(Ratings):
pass
class NA_Brand_Mid_Nunu(Ratings):
pass
class NA_Brand_Mid_Olaf(Ratings):
pass
class NA_Brand_Mid_Orianna(Ratings):
pass
class NA_Brand_Mid_Ornn(Ratings):
pass
class NA_Brand_Mid_Pantheon(Ratings):
pass
class NA_Brand_Mid_Poppy(Ratings):
pass
class NA_Brand_Mid_Quinn(Ratings):
pass
class NA_Brand_Mid_Rakan(Ratings):
pass
class NA_Brand_Mid_Rammus(Ratings):
pass
class NA_Brand_Mid_RekSai(Ratings):
pass
class NA_Brand_Mid_Renekton(Ratings):
pass
class NA_Brand_Mid_Rengar(Ratings):
pass
class NA_Brand_Mid_Riven(Ratings):
pass
class NA_Brand_Mid_Rumble(Ratings):
pass
class NA_Brand_Mid_Ryze(Ratings):
pass
class NA_Brand_Mid_Sejuani(Ratings):
pass
class NA_Brand_Mid_Shaco(Ratings):
pass
class NA_Brand_Mid_Shen(Ratings):
pass
class NA_Brand_Mid_Shyvana(Ratings):
pass
class NA_Brand_Mid_Singed(Ratings):
pass
class NA_Brand_Mid_Sion(Ratings):
pass
class NA_Brand_Mid_Sivir(Ratings):
pass
class NA_Brand_Mid_Skarner(Ratings):
pass
class NA_Brand_Mid_Sona(Ratings):
pass
class NA_Brand_Mid_Soraka(Ratings):
pass
class NA_Brand_Mid_Swain(Ratings):
pass
class NA_Brand_Mid_Syndra(Ratings):
pass
class NA_Brand_Mid_TahmKench(Ratings):
pass
class NA_Brand_Mid_Taliyah(Ratings):
pass
class NA_Brand_Mid_Talon(Ratings):
pass
class NA_Brand_Mid_Taric(Ratings):
pass
class NA_Brand_Mid_Teemo(Ratings):
pass
class NA_Brand_Mid_Thresh(Ratings):
pass
class NA_Brand_Mid_Tristana(Ratings):
pass
class NA_Brand_Mid_Trundle(Ratings):
pass
class NA_Brand_Mid_Tryndamere(Ratings):
pass
class NA_Brand_Mid_TwistedFate(Ratings):
pass
class NA_Brand_Mid_Twitch(Ratings):
pass
class NA_Brand_Mid_Udyr(Ratings):
pass
class NA_Brand_Mid_Urgot(Ratings):
pass
class NA_Brand_Mid_Varus(Ratings):
pass
class NA_Brand_Mid_Vayne(Ratings):
pass
class NA_Brand_Mid_Veigar(Ratings):
pass
class NA_Brand_Mid_Velkoz(Ratings):
pass
class NA_Brand_Mid_Vi(Ratings):
pass
class NA_Brand_Mid_Viktor(Ratings):
pass
class NA_Brand_Mid_Vladimir(Ratings):
pass
class NA_Brand_Mid_Volibear(Ratings):
pass
class NA_Brand_Mid_Warwick(Ratings):
pass
class NA_Brand_Mid_Xayah(Ratings):
pass
class NA_Brand_Mid_Xerath(Ratings):
pass
class NA_Brand_Mid_XinZhao(Ratings):
pass
class NA_Brand_Mid_Yasuo(Ratings):
pass
class NA_Brand_Mid_Yorick(Ratings):
pass
class NA_Brand_Mid_Zac(Ratings):
pass
class NA_Brand_Mid_Zed(Ratings):
pass
class NA_Brand_Mid_Ziggs(Ratings):
pass
class NA_Brand_Mid_Zilean(Ratings):
pass
class NA_Brand_Mid_Zyra(Ratings):
pass
| 0 | 2,908 | 3,450 |
5d0af14cacf81bc84d5e76077f5732b56de324b1 | 4,688 | py | Python | mainBIDIRLSTM.py | SqrtPapere/SentimentAnalysis_CNN | e802780c8f7b6747832cb53b9a4391e9494c73a7 | [
"MIT"
] | 17 | 2018-02-02T14:09:23.000Z | 2020-08-06T21:02:49.000Z | mainBIDIRLSTM.py | SqrtPapere/SentimentAnalysis_CNN | e802780c8f7b6747832cb53b9a4391e9494c73a7 | [
"MIT"
] | null | null | null | mainBIDIRLSTM.py | SqrtPapere/SentimentAnalysis_CNN | e802780c8f7b6747832cb53b9a4391e9494c73a7 | [
"MIT"
] | 1 | 2018-09-10T21:33:14.000Z | 2018-09-10T21:33:14.000Z |
# https://machinelearningmastery.com/predict-sentiment-movie-reviews-using-deep-learning/
import os
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint
from keras.layers import Embedding, Conv1D, MaxPooling1D, Flatten, Dense, Input, Dropout
from keras.models import Model
import matplotlib.pyplot as plt
from keras.layers import LSTM, Bidirectional
import pickle
do_early_stopping = True
# top words to be considered in Tokenizer
NUM_WORDS = 20000
# Length of phrases for padding if shorter or cropping if longer
MAX_SEQUENCE_LENGTH = 500
EMBEDDING_DIM = 300
# preparing train-set from text data
train_text = np.load('Res/train_text.npy')
train_label = np.load('Res/train_label.npy')
print('TrainSet is composed of %s texts.' % len(train_text))
# preparing test-set from text data
test_text = np.load('Res/test_text.npy')
test_label = np.load('Res/test_label.npy')
print('TestSet is composed of %s texts.' % len(test_text))
# Formatting text samples and labels in tensors.
with open('Res/tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
train_sequences = tokenizer.texts_to_sequences(train_text) # Splits words by space (split=” “), Filters out punctuation, Converts text to lowercase. For each text returns a list of integers (same words a codified by same integer)
test_sequences = tokenizer.texts_to_sequences(test_text)
word_index = tokenizer.word_index # dictionary mapping words (str) to their index starting from 0 (int)
print('Found %s unique tokens.' % len(word_index))
train_data = pad_sequences(train_sequences, maxlen=MAX_SEQUENCE_LENGTH) # each element of sequences is cropped or padded to reach maxlen
test_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH)
train_label = np.asarray(train_label)
test_label = np.asarray(test_label)
print('Shape of data tensor:', train_data.shape)
#shuffle dataset
indices = np.arange(train_data.shape[0])
np.random.shuffle(indices)
train_data = train_data[indices]
train_label = train_label[indices]
# split the data into a training set and a validation set
num_validation_samples = int(0.1 * train_data.shape[0])
x_train = train_data[:-num_validation_samples]
y_train = train_label[:-num_validation_samples]
x_val = train_data[-num_validation_samples:]
y_val = train_label[-num_validation_samples:]
x_test = test_data
y_test = test_label
embedding_matrix = np.load('Res/embedding_matrix.npy')
#All that the Embedding layer does is to map the integer inputs to the vectors found at the corresponding index in the embedding matrix, i.e. the sequence [1, 2] would be converted to [embeddings[1], embeddings[2]]. This means that the output of the Embedding layer will be a 3D tensor of shape (samples, sequence_length, embedding_dim).
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedding_layer = Embedding(len(word_index)+1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False)
x = embedding_layer(sequence_input)
x = Dropout(0.3)(x)
x = Bidirectional(LSTM(100))(x)
x = Dropout(0.3)(x)
prob = Dense(1, activation='sigmoid')(x)
model = Model(sequence_input, prob)
model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
tensorboard = TensorBoard(log_dir='./GraphLSTM', histogram_freq=0, write_graph=True)
print('model compiled')
print(model.summary())
early_stopping = EarlyStopping(monitor='val_loss', patience = 2, mode = 'min')
cp = ModelCheckpoint('ModelBLSTM.h5', monitor='val_acc', save_best_only=True, mode='max')
if do_early_stopping:
print('using early stopping strategy')
history = model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=4, batch_size=128, callbacks = [early_stopping, tensorboard])
else:
history = model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=8, batch_size=128)
loss, acc = model.evaluate(x_test, y_test)
print("loss: "+str(loss))
print("accuracy: "+str(acc))
model.save('my_model3.h5')
plotting(history)
| 32.783217 | 337 | 0.760666 |
# https://machinelearningmastery.com/predict-sentiment-movie-reviews-using-deep-learning/
import os
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint
from keras.layers import Embedding, Conv1D, MaxPooling1D, Flatten, Dense, Input, Dropout
from keras.models import Model
import matplotlib.pyplot as plt
from keras.layers import LSTM, Bidirectional
import pickle
def plotting(history):
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
do_early_stopping = True
# top words to be considered in Tokenizer
NUM_WORDS = 20000
# Length of phrases for padding if shorter or cropping if longer
MAX_SEQUENCE_LENGTH = 500
EMBEDDING_DIM = 300
# preparing train-set from text data
train_text = np.load('Res/train_text.npy')
train_label = np.load('Res/train_label.npy')
print('TrainSet is composed of %s texts.' % len(train_text))
# preparing test-set from text data
test_text = np.load('Res/test_text.npy')
test_label = np.load('Res/test_label.npy')
print('TestSet is composed of %s texts.' % len(test_text))
# Formatting text samples and labels in tensors.
with open('Res/tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
train_sequences = tokenizer.texts_to_sequences(train_text) # Splits words by space (split=” “), Filters out punctuation, Converts text to lowercase. For each text returns a list of integers (same words a codified by same integer)
test_sequences = tokenizer.texts_to_sequences(test_text)
word_index = tokenizer.word_index # dictionary mapping words (str) to their index starting from 0 (int)
print('Found %s unique tokens.' % len(word_index))
train_data = pad_sequences(train_sequences, maxlen=MAX_SEQUENCE_LENGTH) # each element of sequences is cropped or padded to reach maxlen
test_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH)
train_label = np.asarray(train_label)
test_label = np.asarray(test_label)
print('Shape of data tensor:', train_data.shape)
#shuffle dataset
indices = np.arange(train_data.shape[0])
np.random.shuffle(indices)
train_data = train_data[indices]
train_label = train_label[indices]
# split the data into a training set and a validation set
num_validation_samples = int(0.1 * train_data.shape[0])
x_train = train_data[:-num_validation_samples]
y_train = train_label[:-num_validation_samples]
x_val = train_data[-num_validation_samples:]
y_val = train_label[-num_validation_samples:]
x_test = test_data
y_test = test_label
embedding_matrix = np.load('Res/embedding_matrix.npy')
#All that the Embedding layer does is to map the integer inputs to the vectors found at the corresponding index in the embedding matrix, i.e. the sequence [1, 2] would be converted to [embeddings[1], embeddings[2]]. This means that the output of the Embedding layer will be a 3D tensor of shape (samples, sequence_length, embedding_dim).
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedding_layer = Embedding(len(word_index)+1, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False)
x = embedding_layer(sequence_input)
x = Dropout(0.3)(x)
x = Bidirectional(LSTM(100))(x)
x = Dropout(0.3)(x)
prob = Dense(1, activation='sigmoid')(x)
model = Model(sequence_input, prob)
model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
tensorboard = TensorBoard(log_dir='./GraphLSTM', histogram_freq=0, write_graph=True)
print('model compiled')
print(model.summary())
early_stopping = EarlyStopping(monitor='val_loss', patience = 2, mode = 'min')
cp = ModelCheckpoint('ModelBLSTM.h5', monitor='val_acc', save_best_only=True, mode='max')
if do_early_stopping:
print('using early stopping strategy')
history = model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=4, batch_size=128, callbacks = [early_stopping, tensorboard])
else:
history = model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=8, batch_size=128)
loss, acc = model.evaluate(x_test, y_test)
print("loss: "+str(loss))
print("accuracy: "+str(acc))
model.save('my_model3.h5')
plotting(history)
| 484 | 0 | 23 |
db1ac4b09e1eda87653b1a76cc69bb491c457300 | 602 | py | Python | atools/lib/moieties/multiple_ports/nh.py | dubosese/atools | 9d6f9e08310f3abb62aa6ec9e6003dcf9b87b513 | [
"MIT"
] | null | null | null | atools/lib/moieties/multiple_ports/nh.py | dubosese/atools | 9d6f9e08310f3abb62aa6ec9e6003dcf9b87b513 | [
"MIT"
] | null | null | null | atools/lib/moieties/multiple_ports/nh.py | dubosese/atools | 9d6f9e08310f3abb62aa6ec9e6003dcf9b87b513 | [
"MIT"
] | 3 | 2020-05-11T15:56:03.000Z | 2021-08-19T01:16:26.000Z | import mbuild as mb
class NH(mb.Compound):
"""A nitrogen with a hydrogen and two open ports. """
if __name__ == '__main__':
nh = NH()
| 27.363636 | 76 | 0.518272 | import mbuild as mb
class NH(mb.Compound):
"""A nitrogen with a hydrogen and two open ports. """
def __init__(self):
super(NH, self).__init__()
mb.load('nh.pdb', compound=self, relative_to_module=self.__module__)
self.translate(-self[0].pos)
self.add(mb.Port(anchor=self[0],
orientation=[0, 1, 0],
separation=0.075), 'up')
self.add(mb.Port(anchor=self[0],
orientation=[0, -1, 0],
separation=0.075), 'down')
if __name__ == '__main__':
nh = NH()
| 431 | 0 | 26 |
68ba827dc155847247b30954d6c05b08b801fa7d | 1,287 | py | Python | ThreeBotPackages/threebot/blog/package.py | Pishoy/jumpscaleX_threebot | 781e839857fecfa601a31d98d86d304e3a6b3b4e | [
"Apache-2.0"
] | null | null | null | ThreeBotPackages/threebot/blog/package.py | Pishoy/jumpscaleX_threebot | 781e839857fecfa601a31d98d86d304e3a6b3b4e | [
"Apache-2.0"
] | null | null | null | ThreeBotPackages/threebot/blog/package.py | Pishoy/jumpscaleX_threebot | 781e839857fecfa601a31d98d86d304e3a6b3b4e | [
"Apache-2.0"
] | null | null | null | from Jumpscale import j
import os
__version__ = "0.0.1"
| 27.382979 | 65 | 0.609169 | from Jumpscale import j
import os
__version__ = "0.0.1"
class Package(j.baseclasses.threebot_package):
def _init(self, **kwargs):
self.branch = kwargs["package"].branch or "master"
os.environ["dev"] = "0"
self.DEV = os.environ.get("dev")
def prepare(self):
j.builders.runtimes.nodejs.install()
prepare_cmd = f"""
cd {self._dirpath}
pushd sapper-blog
export dev={self.DEV}
npm install
npm run export
popd
cp sapper-blog/__sapper__/export/blog/* html/ -R
"""
j.sal.process.execute(prepare_cmd)
def start(self):
"""
called when the 3bot starts
:return:
"""
server = self.openresty
server.install(reset=False)
server.configure()
website = server.get_from_port(443)
locations = website.locations.get("blogs_locations")
website_location = locations.locations_spa.new()
website_location.name = "blog"
website_location.path_url = "/blog"
website_location.use_jumpscale_weblibs = False
fullpath = j.sal.fs.joinPaths(self.package_root, "html/")
website_location.path_location = fullpath
locations.configure()
website.configure()
| 457 | 749 | 23 |
b4bb3da2bc5ad348643bcaf9d44ca34ee09b2c5d | 15,534 | py | Python | instrosetta/interfaces/optomechanics/filter_wheel_pb2_grpc.py | jmosbacher/instrosetta-python | b323ee4d3db0b7d8e22ec731dac521c967e5323d | [
"MIT"
] | null | null | null | instrosetta/interfaces/optomechanics/filter_wheel_pb2_grpc.py | jmosbacher/instrosetta-python | b323ee4d3db0b7d8e22ec731dac521c967e5323d | [
"MIT"
] | null | null | null | instrosetta/interfaces/optomechanics/filter_wheel_pb2_grpc.py | jmosbacher/instrosetta-python | b323ee4d3db0b7d8e22ec731dac521c967e5323d | [
"MIT"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from instrosetta.interfaces.optomechanics import filter_wheel_pb2 as instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2
| 57.962687 | 143 | 0.810609 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from instrosetta.interfaces.optomechanics import filter_wheel_pb2 as instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2
class FilterWheelStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Initialize = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/Initialize',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.InitializeRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.InitializeResponse.FromString,
)
self.Shutdown = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/Shutdown',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.ShutdownRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.ShutdownResponse.FromString,
)
self.GetSpeedOptions = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetSpeedOptions',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedOptionsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedOptionsResponse.FromString,
)
self.GetSpeed = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetSpeed',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedResponse.FromString,
)
self.SetSpeed = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/SetSpeed',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSpeedRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSpeedResponse.FromString,
)
self.GetSensorsOptions = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetSensorsOptions',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsOptionsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsOptionsResponse.FromString,
)
self.GetSensors = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetSensors',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsResponse.FromString,
)
self.SetSensors = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/SetSensors',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSensorsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSensorsResponse.FromString,
)
self.GetFilterOptions = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetFilterOptions',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterOptionsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterOptionsResponse.FromString,
)
self.GetFilter = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetFilter',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterResponse.FromString,
)
self.SetFilter = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/SetFilter',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetFilterRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetFilterResponse.FromString,
)
self.GetPositionOptions = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetPositionOptions',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionOptionsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionOptionsResponse.FromString,
)
self.GetPosition = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetPosition',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionResponse.FromString,
)
self.SetPosition = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/SetPosition',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetPositionRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetPositionResponse.FromString,
)
class FilterWheelServicer(object):
# missing associated documentation comment in .proto file
pass
def Initialize(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Shutdown(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSpeedOptions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSpeed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetSpeed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSensorsOptions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSensors(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetSensors(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFilterOptions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFilter(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetFilter(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPositionOptions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPosition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetPosition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FilterWheelServicer_to_server(servicer, server):
rpc_method_handlers = {
'Initialize': grpc.unary_unary_rpc_method_handler(
servicer.Initialize,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.InitializeRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.InitializeResponse.SerializeToString,
),
'Shutdown': grpc.unary_unary_rpc_method_handler(
servicer.Shutdown,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.ShutdownRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.ShutdownResponse.SerializeToString,
),
'GetSpeedOptions': grpc.unary_unary_rpc_method_handler(
servicer.GetSpeedOptions,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedOptionsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedOptionsResponse.SerializeToString,
),
'GetSpeed': grpc.unary_unary_rpc_method_handler(
servicer.GetSpeed,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedResponse.SerializeToString,
),
'SetSpeed': grpc.unary_unary_rpc_method_handler(
servicer.SetSpeed,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSpeedRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSpeedResponse.SerializeToString,
),
'GetSensorsOptions': grpc.unary_unary_rpc_method_handler(
servicer.GetSensorsOptions,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsOptionsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsOptionsResponse.SerializeToString,
),
'GetSensors': grpc.unary_unary_rpc_method_handler(
servicer.GetSensors,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsResponse.SerializeToString,
),
'SetSensors': grpc.unary_unary_rpc_method_handler(
servicer.SetSensors,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSensorsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSensorsResponse.SerializeToString,
),
'GetFilterOptions': grpc.unary_unary_rpc_method_handler(
servicer.GetFilterOptions,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterOptionsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterOptionsResponse.SerializeToString,
),
'GetFilter': grpc.unary_unary_rpc_method_handler(
servicer.GetFilter,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterResponse.SerializeToString,
),
'SetFilter': grpc.unary_unary_rpc_method_handler(
servicer.SetFilter,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetFilterRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetFilterResponse.SerializeToString,
),
'GetPositionOptions': grpc.unary_unary_rpc_method_handler(
servicer.GetPositionOptions,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionOptionsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionOptionsResponse.SerializeToString,
),
'GetPosition': grpc.unary_unary_rpc_method_handler(
servicer.GetPosition,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionResponse.SerializeToString,
),
'SetPosition': grpc.unary_unary_rpc_method_handler(
servicer.SetPosition,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetPositionRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetPositionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 8,926 | 6,316 | 69 |
fa6cf6b2736d1fc6ebd9c3b07176a84c77cb8f2b | 3,274 | py | Python | Discrete_Structures/Exams/Exam_Ch_05_Sequences/Fibo_Exam.py | DoctorOac/SwosuCsPythonExamples | 07476b9b4ef9a6f8bd68921aef19e8f00183b1e7 | [
"Apache-2.0"
] | null | null | null | Discrete_Structures/Exams/Exam_Ch_05_Sequences/Fibo_Exam.py | DoctorOac/SwosuCsPythonExamples | 07476b9b4ef9a6f8bd68921aef19e8f00183b1e7 | [
"Apache-2.0"
] | null | null | null | Discrete_Structures/Exams/Exam_Ch_05_Sequences/Fibo_Exam.py | DoctorOac/SwosuCsPythonExamples | 07476b9b4ef9a6f8bd68921aef19e8f00183b1e7 | [
"Apache-2.0"
] | null | null | null | print('hello')
"""
Compare the number of operations and the time needed
to compute Fibonacci numbers recursively versus that
needed to compute them iteratively
"""
# recursive work
# Python program to display the Fibonacci sequence
import time
recursive_data = Data_tracker()
number_of_terms = 40
recursive_data.start_time = time.time()
# check if the number of terms is valid
if number_of_terms <= 0:
print("Plese enter a positive integer")
else:
print(f"Fibonacci number for {number_of_terms} terms:")
print(recur_fibo((number_of_terms - 1), recursive_data))
recursive_data.stop_time = time.time()
print('\n\nRECUSIVE DATA')
recursive_data.print_function_data()
# iterative work
# https://www.programiz.com/python-programming/examples/fibonacci-sequence
# Program to display the Fibonacci sequence up to n-th term
iterative_data = Data_tracker()
# first two terms
n1, n2 = 0, 1
count = 0
# check if the number of terms is valid
if number_of_terms <= 0:
print("Please enter a positive integer")
# if there is only one term, return n1
elif number_of_terms == 1:
print("Fibonacci sequence upto",number_of_terms,":")
print(n1)
# generate fibonacci sequence
else:
print("Fibonacci sequence:")
iterative_data.start_time = time.time()
while count < number_of_terms:
iterative_data.increment_if_count()
#print(n1)
iterative_data.increment_add_count()
nth = n1 + n2
# update values
iterative_data.increment_assignment_count()
n1 = n2
iterative_data.increment_assignment_count()
n2 = nth
iterative_data.increment_assignment_count()
count += 1
iterative_data.stop_time = time.time()
print('\n\nITERATIVE DATA')
iterative_data.print_function_data()
| 27.745763 | 87 | 0.703726 | print('hello')
"""
Compare the number of operations and the time needed
to compute Fibonacci numbers recursively versus that
needed to compute them iteratively
"""
class Data_tracker:
def __init__(self):
self.data = []
self.number_of_times_function_called = 0
self.if_count = 0
self.add_count = 0
self.subtract_count = 0
self.start_time = 0
self.stop_time = 0
self.assignment_count = 0
def increment_assignment_count(self):
self.assignment_count += 1
def increment_function_call_count(self):
self.number_of_times_function_called += 1
def increment_if_count(self):
self.if_count += 1
def increment_add_count(self):
self.add_count += 1
def increment_subtract_count(self):
self.subtract_count += 1
def print_function_data(self):
print(f'we called this function {self.number_of_times_function_called} times.')
print(f'we added {self.add_count} times.')
print(f'we subtracted {self.subtract_count} times.')
print(f'we did a if statement {self.if_count} times.')
print(f'we did an assignment operation {self.assignment_count} times.')
print("--- %s seconds ---" % (self.stop_time - self.start_time))
# recursive work
# Python program to display the Fibonacci sequence
def recur_fibo(n, recursive_data):
recursive_data.increment_function_call_count()
recursive_data.increment_if_count()
if n <= 1:
return n
else:
recursive_data.increment_add_count()
recursive_data.increment_subtract_count()
recursive_data.increment_subtract_count()
return(recur_fibo(n-1, recursive_data) + recur_fibo(n-2, recursive_data))
import time
recursive_data = Data_tracker()
number_of_terms = 40
recursive_data.start_time = time.time()
# check if the number of terms is valid
if number_of_terms <= 0:
print("Plese enter a positive integer")
else:
print(f"Fibonacci number for {number_of_terms} terms:")
print(recur_fibo((number_of_terms - 1), recursive_data))
recursive_data.stop_time = time.time()
print('\n\nRECUSIVE DATA')
recursive_data.print_function_data()
# iterative work
# https://www.programiz.com/python-programming/examples/fibonacci-sequence
# Program to display the Fibonacci sequence up to n-th term
iterative_data = Data_tracker()
# first two terms
n1, n2 = 0, 1
count = 0
# check if the number of terms is valid
if number_of_terms <= 0:
print("Please enter a positive integer")
# if there is only one term, return n1
elif number_of_terms == 1:
print("Fibonacci sequence upto",number_of_terms,":")
print(n1)
# generate fibonacci sequence
else:
print("Fibonacci sequence:")
iterative_data.start_time = time.time()
while count < number_of_terms:
iterative_data.increment_if_count()
#print(n1)
iterative_data.increment_add_count()
nth = n1 + n2
# update values
iterative_data.increment_assignment_count()
n1 = n2
iterative_data.increment_assignment_count()
n2 = nth
iterative_data.increment_assignment_count()
count += 1
iterative_data.stop_time = time.time()
print('\n\nITERATIVE DATA')
iterative_data.print_function_data()
| 1,281 | -2 | 234 |
2bc0aff7c339f85f8d7761d9cc25bcfe9cb86616 | 23,532 | py | Python | commands/arch/network.py | naterh/openstack-guest-agents-unix | b6262b190d355f6469d95f462be0db53e3eb7ede | [
"Apache-2.0"
] | 15 | 2015-01-06T21:58:24.000Z | 2018-11-27T09:34:14.000Z | commands/arch/network.py | naterh/openstack-guest-agents-unix | b6262b190d355f6469d95f462be0db53e3eb7ede | [
"Apache-2.0"
] | 9 | 2015-03-06T02:11:29.000Z | 2021-03-13T07:13:45.000Z | commands/arch/network.py | naterh/openstack-guest-agents-unix | b6262b190d355f6469d95f462be0db53e3eb7ede | [
"Apache-2.0"
] | 18 | 2015-03-05T21:28:09.000Z | 2020-09-16T11:07:21.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
arch linux network helper module
"""
# Arch has two different kinds of network configuration. More recently,
# there's 'netcfg' and previously (for lack of a better term) 'legacy'.
#
# legacy uses:
# - 1 shell-script-style global configuration (/etc/rc.conf)
# - one IP per interface
# - routes are per interface
# - gateways are global
# - DNS is per interface
#
# netcfg uses:
# - multiple shell-script-style network configurations, 1 per interface
# - one IP per configuration
# - routes are per interface
# - gateways are per interface
# - DNS is global (/etc/resolv.conf)
#
# netcfg is designed for one IP per configuration, but it's not tolerant
# of the older style colon interfaces for IP aliasing. So we have to use
# a hack to get IP aliasing working:
# https://bbs.archlinux.org/viewtopic.php?pid=951573#p951573
#
# Arch is a rolling release, meaning new features and updated packages
# roll out on a unpredictable schedule. It also means there is no such
# thing as v1.0 or v2.0. We check if the netcfg package is installed to
# determine which format should be used.
import os
import re
import time
import subprocess
import logging
from cStringIO import StringIO
import commands.network
CONF_FILE = "/etc/rc.conf"
NETWORK_DIR = "/etc/network.d"
NETCTL_DIR = "/etc/netctl/"
def get_hostname():
"""
Just required to check /etc/rc.conf for SysVInit based Archlinux images.
All updated SystemD supporting images have it at default /etc/hostname
Will fetch current hostname of VM if any and return.
Looks at /etc/rc.conf config for Archlinux server using SysVInit.
"""
try:
with open(CONF_FILE) as hostname_fyl:
for line in hostname_fyl.readlines():
hn = re.search('HOSTNAME="(.*)"', line)
if hn:
return hn.group(1)
return None
except Exception, e:
logging.info("Init support Arch hostname enquiry failed: %s." % str(e))
return None
def get_hostname_file(infile, hostname):
"""
Update hostname on system
"""
outfile = StringIO()
found = False
for line in infile:
line = line.strip()
if '=' in line:
k, v = line.split('=', 1)
k = k.strip()
if k == "HOSTNAME":
print >> outfile, 'HOSTNAME="%s"' % hostname
found = True
else:
print >> outfile, line
else:
print >> outfile, line
if not found:
print >> outfile, 'HOSTNAME="%s"' % hostname
outfile.seek(0)
return outfile.read()
def _update_rc_conf_legacy(infile, interfaces):
"""
Return data for (sub-)interfaces and routes
"""
# Updating this file happens in two phases since it's non-trivial to
# update. The INTERFACES and ROUTES variables the key lines, but they
# will in turn reference other variables, which may be before or after.
# As a result, we need to load the entire file, find the main variables
# and then remove the reference variables. When that is done, we add
# the lines for the new config.
# First generate new config
ifaces = []
routes = []
gateway4, gateway6 = commands.network.get_gateways(interfaces)
ifnames = interfaces.keys()
ifnames.sort()
for ifname_prefix in ifnames:
interface = interfaces[ifname_prefix]
ip4s = interface['ip4s']
ip6s = interface['ip6s']
ifname_suffix_num = 0
for ip4, ip6 in map(None, ip4s, ip6s):
if ifname_suffix_num:
ifname = "%s:%d" % (ifname_prefix, ifname_suffix_num)
else:
ifname = ifname_prefix
line = [ifname]
if ip4:
line.append('%(address)s netmask %(netmask)s' % ip4)
if ip6:
line.append('add %(address)s/%(prefixlen)s' % ip6)
ifname_suffix_num += 1
ifaces.append((ifname.replace(':', '_'), ' '.join(line)))
for i, route in enumerate(interface['routes']):
if route['network'] == '0.0.0.0' and \
route['netmask'] == '0.0.0.0' and \
route['gateway'] == gateway4:
continue
line = "-net %(network)s netmask %(netmask)s gw %(gateway)s" % \
route
routes.append(('%s_route%d' % (ifname_prefix, i), line))
if gateway4:
routes.append(('gateway', 'default gw %s' % gateway4))
if gateway6:
routes.append(('gateway6', 'default gw %s' % gateway6))
# Then load old file
lines, variables = _parse_config(infile)
# Update INTERFACES
lineno = variables.get('INTERFACES')
if lineno is not None:
# Remove old lines
for name in _parse_variable(lines[lineno], strip_bang=True):
if name in variables:
lines[variables[name]] = None
else:
lines.append('')
lineno = len(lines) - 1
config = []
names = []
for name, line in ifaces:
config.append('%s="%s"' % (name, line))
names.append(name)
config.append('INTERFACES=(%s)' % ' '.join(names))
lines[lineno] = '\n'.join(config)
# Update ROUTES
lineno = variables.get('ROUTES')
if lineno is not None:
# Remove old lines
for name in _parse_variable(lines[lineno], strip_bang=True):
if name in variables:
lines[variables[name]] = None
else:
lines.append('')
lineno = len(lines) - 1
config = []
names = []
for name, line in routes:
config.append('%s="%s"' % (name, line))
names.append(name)
config.append('ROUTES=(%s)' % ' '.join(names))
lines[lineno] = '\n'.join(config)
# (Possibly) comment out NETWORKS
lineno = variables.get('NETWORKS')
if lineno is not None:
for name in _parse_variable(lines[lineno], strip_bang=True):
nlineno = variables.get(name)
if nlineno is not None:
lines[nlineno] = '#' + lines[lineno]
lines[lineno] = '#' + lines[lineno]
# (Possibly) update DAEMONS
lineno = variables.get('DAEMONS')
if lineno is not None:
daemons = _parse_variable(lines[lineno])
try:
network = daemons.index('!network')
daemons[network] = 'network'
if '@net-profiles' in daemons:
daemons.remove('@net-profiles')
lines[lineno] = 'DAEMONS=(%s)' % ' '.join(daemons)
except ValueError:
pass
# Filter out any removed lines
lines = filter(lambda l: l is not None, lines)
# Serialize into new file
outfile = StringIO()
for line in lines:
print >> outfile, line
outfile.seek(0)
return outfile.read()
def _get_file_data_netcfg(ifname, interface):
"""
Return data for (sub-)interfaces
"""
ifaces = []
label = interface['label']
ip4s = interface['ip4s']
ip6s = interface['ip6s']
gateway4 = interface['gateway4']
gateway6 = interface['gateway6']
dns = interface['dns']
outfile = StringIO()
if label:
print >>outfile, "# Label %s" % label
print >>outfile, 'CONNECTION="ethernet"'
print >>outfile, 'INTERFACE=%s' % ifname
if ip4s:
ip4 = ip4s.pop(0)
print >>outfile, 'IP="static"'
print >>outfile, 'ADDR="%(address)s"' % ip4
print >>outfile, 'NETMASK="%(netmask)s"' % ip4
if gateway4:
print >>outfile, 'GATEWAY="%s"' % gateway4
if ip6s:
ip6 = ip6s.pop(0)
print >>outfile, 'IP6="static"'
print >>outfile, 'ADDR6="%(address)s/%(prefixlen)s"' % ip6
if gateway6:
print >>outfile, 'GATEWAY6="%s"' % gateway6
routes = ['"%(network)s/%(netmask)s via %(gateway)s"' % route
for route in interface['routes'] if not
route['network'] == '0.0.0.0' and not
route['netmask'] == '0.0.0.0' and not
route['gateway'] == gateway4]
if routes:
print >>outfile, 'ROUTES=(%s)' % ' '.join(routes)
if dns:
print >>outfile, 'DNS=(%s)' % ' '.join(dns)
# Finally add remaining aliases. This is kind of hacky, see comment at
# top for explanation
aliases = ['%(address)s/%(netmask)s' % ip4 for ip4 in ip4s] + \
['%(address)s/%(prefixlen)s' % ip6 for ip6 in ip6s]
if aliases:
commands = '; '.join(['ip addr add %s dev %s' % (a, ifname)
for a in aliases])
print >>outfile, 'POST_UP="%s"' % commands
aliases.reverse()
commands = '; '.join(['ip addr del %s dev %s' % (a, ifname)
for a in aliases])
print >>outfile, 'PRE_DOWN="%s"' % commands
outfile.seek(0)
return outfile.read()
def process_interface_files_legacy(update_files, interfaces):
"""Generate changeset for interface configuration"""
infile = StringIO(update_files.get(CONF_FILE, ''))
data = _update_rc_conf_legacy(infile, interfaces)
update_files[CONF_FILE] = data
def process_interface_files_netctl(update_files, interfaces):
"""Generate changeset for interface configuration"""
# Enumerate all of the existing network files
remove_files = set()
for filename in os.listdir(NETCTL_DIR):
filepath = os.path.join(NETCTL_DIR, filename)
if not filename.endswith('~') and not os.path.isdir(filepath):
remove_files.add(filepath)
netnames = []
for ifname, interface in interfaces.iteritems():
data = _get_file_data_netctl(ifname, interface)
filepath = os.path.join(NETCTL_DIR, ifname)
update_files[filepath] = data
if filepath in remove_files:
remove_files.remove(filepath)
netnames.append(ifname)
return remove_files, netnames
def process_interface_files_netcfg(update_files, interfaces):
"""Generate changeset for interface configuration"""
# Enumerate all of the existing network files
remove_files = set()
for filename in os.listdir(NETWORK_DIR):
filepath = os.path.join(NETWORK_DIR, filename)
if not filename.endswith('~') and not os.path.isdir(filepath):
remove_files.add(filepath)
netnames = []
for ifname, interface in interfaces.iteritems():
data = _get_file_data_netcfg(ifname, interface)
filepath = os.path.join(NETWORK_DIR, ifname)
update_files[filepath] = data
if filepath in remove_files:
remove_files.remove(filepath)
netnames.append(ifname)
infile = StringIO(update_files.get(CONF_FILE, ''))
data = _update_rc_conf_netcfg(infile, netnames)
update_files[CONF_FILE] = data
return remove_files, netnames
| 31.8 | 91 | 0.582738 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
arch linux network helper module
"""
# Arch has two different kinds of network configuration. More recently,
# there's 'netcfg' and previously (for lack of a better term) 'legacy'.
#
# legacy uses:
# - 1 shell-script-style global configuration (/etc/rc.conf)
# - one IP per interface
# - routes are per interface
# - gateways are global
# - DNS is per interface
#
# netcfg uses:
# - multiple shell-script-style network configurations, 1 per interface
# - one IP per configuration
# - routes are per interface
# - gateways are per interface
# - DNS is global (/etc/resolv.conf)
#
# netcfg is designed for one IP per configuration, but it's not tolerant
# of the older style colon interfaces for IP aliasing. So we have to use
# a hack to get IP aliasing working:
# https://bbs.archlinux.org/viewtopic.php?pid=951573#p951573
#
# Arch is a rolling release, meaning new features and updated packages
# roll out on a unpredictable schedule. It also means there is no such
# thing as v1.0 or v2.0. We check if the netcfg package is installed to
# determine which format should be used.
import os
import re
import time
import subprocess
import logging
from cStringIO import StringIO
import commands.network
CONF_FILE = "/etc/rc.conf"
NETWORK_DIR = "/etc/network.d"
NETCTL_DIR = "/etc/netctl/"
def _execute(command):
logging.info('executing %s' % ' '.join(command))
pipe = subprocess.PIPE
p = subprocess.Popen(command, stdin=pipe, stdout=pipe, stderr=pipe, env={})
# Wait for process to finish and get output
stdout, stderr = p.communicate()
logging.debug('status = %d' % p.returncode)
if p.returncode:
logging.info('stdout = %r' % stdout)
logging.info('stderr = %r' % stderr)
return p.returncode
def configure_network(hostname, interfaces):
update_files = {}
init=""
# We need to figure out what style of network configuration is
# currently being used by looking at /etc/rc.conf and then look
# to see what style of network configuration we want to use by
# looking to see if the netcfg package is installed
if os.path.basename(os.path.realpath('/sbin/init')) == 'systemd':
cur_netctl = True
status = _execute(['/usr/bin/pacman', '-Q', 'netctl'])
use_netctl = (status == 0)
remove_files, netnames = process_interface_files_netctl(update_files, interfaces)
get_hostname_file_systemd(hostname)
else:
cur_netctl = False
use_netctl = False
if os.path.exists(CONF_FILE):
update_files[CONF_FILE] = open(CONF_FILE).read()
infile = StringIO(update_files.get(CONF_FILE, ''))
cur_netcfg = True # Currently using netcfg
lines, variables = _parse_config(infile)
lineno = variables.get('DAEMONS')
if lineno is not None:
daemons = _parse_variable(lines[lineno])
if 'network' in daemons:
# Config uses legacy style networking
cur_netcfg = False
status = _execute(['/usr/bin/pacman', '-Q', 'netcfg'])
use_netcfg = (status == 0)
logging.info('using %s style configuration' %
(use_netcfg and 'netcfg' or 'legacy'))
if use_netcfg:
remove_files, netnames = process_interface_files_netcfg(
update_files, interfaces)
else:
process_interface_files_legacy(update_files, interfaces)
remove_files = set()
# Generate new /etc/resolv.conf file
filepath, data = commands.network.get_resolv_conf(interfaces)
if data:
update_files[filepath] = data
# Update config file with new hostname
infile = StringIO(update_files.get(CONF_FILE, ''))
data = get_hostname_file(infile, hostname)
update_files[CONF_FILE] = data
# Generate new /etc/hosts file
filepath, data = commands.network.get_etc_hosts(interfaces, hostname)
update_files[filepath] = data
# Set hostname
try:
commands.network.sethostname(hostname)
except Exception, e:
logging.error("Couldn't sethostname(): %s" % str(e))
return (500, "Couldn't set hostname: %s" % str(e))
# Stage files
commands.network.stage_files(update_files)
errors = set()
# Down network
logging.info('configuring interfaces down')
if cur_netctl:
for netname in netnames:
if not interfaces[netname]['up']:
# Don't try to down an interface that isn't already up
logging.info(' %s, skipped (already down)' %
netname)
continue
status = _execute(['/usr/bin/netctl', 'stop', ''.join(['ethernet-', netname])])
if status != 0:
logging.info(' %s, failed (status %d)' % (netname, status))
# Treat down failures as soft failures
else:
logging.info(' %s, success' % netname)
elif cur_netcfg:
for netname in netnames:
if not interfaces[netname]['up']:
# Don't try to down an interface that isn't already up
logging.info(' %s, skipped (already down)' %
netname)
continue
status = _execute(['/usr/bin/netcfg', '-d', netname])
if status != 0:
logging.info(' %s, failed (status %d)' % (netname, status))
# Treat down failures as soft failures
else:
logging.info(' %s, success' % netname)
else:
status = _execute(['/etc/rc.d/network', 'stop'])
if status != 0:
return (500, "Couldn't stop network: %d" % status)
# Move files
commands.network.move_files(update_files, remove_files)
# Up network
logging.info('configuring interfaces up')
if use_netctl:
for netname in netnames:
status = _execute(['/usr/bin/netctl', 'restart', netname])
status = _execute(['/usr/bin/netctl', 'reenable', netname])
if status != 0:
logging.info(' %s, failed (status %d), trying again' %
(netname, status))
status = _execute(['/usr/bin/netctl', 'restart', netname])
status = _execute(['/usr/bin/netctl', 'reenable', netname])
if status != 0:
logging.info(' %s, failed (status %d)' %
(netname, status))
errors.add(netname)
else:
logging.info(' %s, success' % netname)
else:
logging.info(' %s, success' % netname)
elif use_netcfg:
for netname in netnames:
status = _execute(['/usr/bin/netcfg', '-u', netname])
if status != 0:
logging.info(' %s, failed (status %d), trying again' %
(netname, status))
# HACK: Migrating from legacy to netcfg configurations is
# troublesome because of Arch bugs. Stopping the network
# in legacy downs the interface, but doesn't remove the IP
# addresses. This causes netcfg to complain and fail when
# we go to configure the interface up. As a side-effect, it
# will remove the offending IP. A second attempt to configure
# the interface up succeeds. So we'll try a second time.
status = _execute(['/usr/bin/netcfg', '-u', netname])
if status != 0:
logging.info(' %s, failed (status %d)' %
(netname, status))
errors.add(netname)
else:
logging.info(' %s, success' % netname)
else:
logging.info(' %s, success' % netname)
else:
status = _execute(['/etc/rc.d/network', 'start'])
if status != 0:
return (500, "Couldn't start network: %d" % status)
if errors:
errors = list(errors)
errors.sort()
return (500, 'Failed to start ' + ', '.join(errors))
return (0, "")
def get_hostname():
"""
Just required to check /etc/rc.conf for SysVInit based Archlinux images.
All updated SystemD supporting images have it at default /etc/hostname
Will fetch current hostname of VM if any and return.
Looks at /etc/rc.conf config for Archlinux server using SysVInit.
"""
try:
with open(CONF_FILE) as hostname_fyl:
for line in hostname_fyl.readlines():
hn = re.search('HOSTNAME="(.*)"', line)
if hn:
return hn.group(1)
return None
except Exception, e:
logging.info("Init support Arch hostname enquiry failed: %s." % str(e))
return None
def get_hostname_file_systemd(hostname):
_execute(['/usr/bin/hostnamectl', 'set-hostname', hostname])
def get_hostname_file(infile, hostname):
"""
Update hostname on system
"""
outfile = StringIO()
found = False
for line in infile:
line = line.strip()
if '=' in line:
k, v = line.split('=', 1)
k = k.strip()
if k == "HOSTNAME":
print >> outfile, 'HOSTNAME="%s"' % hostname
found = True
else:
print >> outfile, line
else:
print >> outfile, line
if not found:
print >> outfile, 'HOSTNAME="%s"' % hostname
outfile.seek(0)
return outfile.read()
def _parse_variable(line, strip_bang=False):
k, v = line.split('=')
v = v.strip()
if v[0] == '(' and v[-1] == ')':
v = v[1:-1]
vars = re.split('\s+', v.strip())
if strip_bang:
vars = [v.lstrip('!') for v in vars]
return vars
def _parse_config(infile):
lines = []
variables = {}
for line in infile:
line = line.strip()
lines.append(line)
# FIXME: This doesn't correctly parse shell scripts perfectly. It
# assumes a fairly simple subset
if '=' not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
variables[k] = len(lines) - 1
return lines, variables
def _update_rc_conf_legacy(infile, interfaces):
"""
Return data for (sub-)interfaces and routes
"""
# Updating this file happens in two phases since it's non-trivial to
# update. The INTERFACES and ROUTES variables the key lines, but they
# will in turn reference other variables, which may be before or after.
# As a result, we need to load the entire file, find the main variables
# and then remove the reference variables. When that is done, we add
# the lines for the new config.
# First generate new config
ifaces = []
routes = []
gateway4, gateway6 = commands.network.get_gateways(interfaces)
ifnames = interfaces.keys()
ifnames.sort()
for ifname_prefix in ifnames:
interface = interfaces[ifname_prefix]
ip4s = interface['ip4s']
ip6s = interface['ip6s']
ifname_suffix_num = 0
for ip4, ip6 in map(None, ip4s, ip6s):
if ifname_suffix_num:
ifname = "%s:%d" % (ifname_prefix, ifname_suffix_num)
else:
ifname = ifname_prefix
line = [ifname]
if ip4:
line.append('%(address)s netmask %(netmask)s' % ip4)
if ip6:
line.append('add %(address)s/%(prefixlen)s' % ip6)
ifname_suffix_num += 1
ifaces.append((ifname.replace(':', '_'), ' '.join(line)))
for i, route in enumerate(interface['routes']):
if route['network'] == '0.0.0.0' and \
route['netmask'] == '0.0.0.0' and \
route['gateway'] == gateway4:
continue
line = "-net %(network)s netmask %(netmask)s gw %(gateway)s" % \
route
routes.append(('%s_route%d' % (ifname_prefix, i), line))
if gateway4:
routes.append(('gateway', 'default gw %s' % gateway4))
if gateway6:
routes.append(('gateway6', 'default gw %s' % gateway6))
# Then load old file
lines, variables = _parse_config(infile)
# Update INTERFACES
lineno = variables.get('INTERFACES')
if lineno is not None:
# Remove old lines
for name in _parse_variable(lines[lineno], strip_bang=True):
if name in variables:
lines[variables[name]] = None
else:
lines.append('')
lineno = len(lines) - 1
config = []
names = []
for name, line in ifaces:
config.append('%s="%s"' % (name, line))
names.append(name)
config.append('INTERFACES=(%s)' % ' '.join(names))
lines[lineno] = '\n'.join(config)
# Update ROUTES
lineno = variables.get('ROUTES')
if lineno is not None:
# Remove old lines
for name in _parse_variable(lines[lineno], strip_bang=True):
if name in variables:
lines[variables[name]] = None
else:
lines.append('')
lineno = len(lines) - 1
config = []
names = []
for name, line in routes:
config.append('%s="%s"' % (name, line))
names.append(name)
config.append('ROUTES=(%s)' % ' '.join(names))
lines[lineno] = '\n'.join(config)
# (Possibly) comment out NETWORKS
lineno = variables.get('NETWORKS')
if lineno is not None:
for name in _parse_variable(lines[lineno], strip_bang=True):
nlineno = variables.get(name)
if nlineno is not None:
lines[nlineno] = '#' + lines[lineno]
lines[lineno] = '#' + lines[lineno]
# (Possibly) update DAEMONS
lineno = variables.get('DAEMONS')
if lineno is not None:
daemons = _parse_variable(lines[lineno])
try:
network = daemons.index('!network')
daemons[network] = 'network'
if '@net-profiles' in daemons:
daemons.remove('@net-profiles')
lines[lineno] = 'DAEMONS=(%s)' % ' '.join(daemons)
except ValueError:
pass
# Filter out any removed lines
lines = filter(lambda l: l is not None, lines)
# Serialize into new file
outfile = StringIO()
for line in lines:
print >> outfile, line
outfile.seek(0)
return outfile.read()
def _get_file_data_netctl(ifname, interface):
ifaces = []
ifaces = []
label = interface['label']
ip4s = interface['ip4s']
ip6s = interface['ip6s']
gateway4 = interface['gateway4']
gateway6 = interface['gateway6']
dns = interface['dns']
outfile = StringIO()
if label:
print >>outfile, "# Label %s" % label
print >>outfile, 'Connection=ethernet'
print >>outfile, 'Interface=%s' % ifname
if ip4s:
ip4 = ip4s.pop(0)
print >>outfile, 'IP=static'
print >>outfile, 'Address=(\'%(address)s/%(netmask)s\')' % ip4
if gateway4:
print >>outfile, 'Gateway=%s' % gateway4
if ip6s:
ip6 = ip6s.pop(0)
print >>outfile, 'IP6=static'
print >>outfile, 'Address6=(\'%(address)s/%(prefixlen)s\')' % ip6
if gateway6:
print >>outfile, 'Gateway6=%s' % gateway6
routes = ['%(network)s/%(netmask)s via %(gateway)s' % route
for route in interface['routes'] if not
route['network'] == '0.0.0.0' and not
route['netmask'] == '0.0.0.0' and not
route['gateway'] == gateway4]
if routes:
print >>outfile, 'Routes=(\'%s\')' % '\' \''.join(routes)
if dns:
print >>outfile, 'DNS=(\'%s\')' % '\' \''.join(dns)
outfile.seek(0)
return outfile.read()
def _get_file_data_netcfg(ifname, interface):
"""
Return data for (sub-)interfaces
"""
ifaces = []
label = interface['label']
ip4s = interface['ip4s']
ip6s = interface['ip6s']
gateway4 = interface['gateway4']
gateway6 = interface['gateway6']
dns = interface['dns']
outfile = StringIO()
if label:
print >>outfile, "# Label %s" % label
print >>outfile, 'CONNECTION="ethernet"'
print >>outfile, 'INTERFACE=%s' % ifname
if ip4s:
ip4 = ip4s.pop(0)
print >>outfile, 'IP="static"'
print >>outfile, 'ADDR="%(address)s"' % ip4
print >>outfile, 'NETMASK="%(netmask)s"' % ip4
if gateway4:
print >>outfile, 'GATEWAY="%s"' % gateway4
if ip6s:
ip6 = ip6s.pop(0)
print >>outfile, 'IP6="static"'
print >>outfile, 'ADDR6="%(address)s/%(prefixlen)s"' % ip6
if gateway6:
print >>outfile, 'GATEWAY6="%s"' % gateway6
routes = ['"%(network)s/%(netmask)s via %(gateway)s"' % route
for route in interface['routes'] if not
route['network'] == '0.0.0.0' and not
route['netmask'] == '0.0.0.0' and not
route['gateway'] == gateway4]
if routes:
print >>outfile, 'ROUTES=(%s)' % ' '.join(routes)
if dns:
print >>outfile, 'DNS=(%s)' % ' '.join(dns)
# Finally add remaining aliases. This is kind of hacky, see comment at
# top for explanation
aliases = ['%(address)s/%(netmask)s' % ip4 for ip4 in ip4s] + \
['%(address)s/%(prefixlen)s' % ip6 for ip6 in ip6s]
if aliases:
commands = '; '.join(['ip addr add %s dev %s' % (a, ifname)
for a in aliases])
print >>outfile, 'POST_UP="%s"' % commands
aliases.reverse()
commands = '; '.join(['ip addr del %s dev %s' % (a, ifname)
for a in aliases])
print >>outfile, 'PRE_DOWN="%s"' % commands
outfile.seek(0)
return outfile.read()
def _update_rc_conf_netcfg(infile, netnames):
# Load old file
lines, variables = _parse_config(infile)
# Update NETWORKS
lineno = variables.get('NETWORKS')
if lineno is None:
# Add new line to contain it
lines.append('')
lineno = len(lines) - 1
lines[lineno] = 'NETWORKS=(%s)' % ' '.join(netnames)
# (Possibly) comment out INTERFACES
lineno = variables.get('INTERFACES')
if lineno is not None:
for name in _parse_variable(lines[lineno], strip_bang=True):
nlineno = variables.get(name)
if nlineno is not None:
lines[nlineno] = '#' + lines[lineno]
lines[lineno] = '#' + lines[lineno]
# (Possibly) comment out ROUTES
lineno = variables.get('ROUTES')
if lineno is not None:
for name in _parse_variable(lines[lineno], strip_bang=True):
nlineno = variables.get(name)
if nlineno is not None:
lines[nlineno] = '#' + lines[lineno]
lines[lineno] = '#' + lines[lineno]
# (Possibly) update DAEMONS
lineno = variables.get('DAEMONS')
if lineno is not None:
daemons = _parse_variable(lines[lineno])
try:
network = daemons.index('network')
daemons[network] = '!network'
if '@net-profiles' not in daemons:
daemons.insert(network + 1, '@net-profiles')
lines[lineno] = 'DAEMONS=(%s)' % ' '.join(daemons)
except ValueError:
pass
# Serialize into new file
outfile = StringIO()
for line in lines:
print >> outfile, line
outfile.seek(0)
return outfile.read()
def get_interface_files(infiles, interfaces, version):
if version == 'netctl':
update_files = {}
netnames = []
for ifname, interface in interfaces.iteritems():
data = _get_file_data_netctl(ifname, interface)
filepath = os.path.join(NETCTL_DIR, ifname)
update_files[filepath] = data
netnames.append(ifname)
status = _execute(['/usr/bin/netctl', 'restart', ifname])
status = _execute(['/usr/bin/netctl', 'reenable', ifname])
if version == 'netcfg' and version != 'netctl':
update_files = {}
netnames = []
for ifname, interface in interfaces.iteritems():
data = _get_file_data_netcfg(ifname, interface)
filepath = os.path.join(NETWORK_DIR, ifname)
update_files[filepath] = data
netnames.append(ifname)
infile = StringIO(infiles.get(CONF_FILE, ''))
data = _update_rc_conf_netcfg(infile, netnames)
update_files[CONF_FILE] = data
return update_files
else:
infile = StringIO(infiles.get(CONF_FILE, ''))
data = _update_rc_conf_legacy(infile, interfaces)
return {CONF_FILE: data}
def process_interface_files_legacy(update_files, interfaces):
"""Generate changeset for interface configuration"""
infile = StringIO(update_files.get(CONF_FILE, ''))
data = _update_rc_conf_legacy(infile, interfaces)
update_files[CONF_FILE] = data
def process_interface_files_netctl(update_files, interfaces):
"""Generate changeset for interface configuration"""
# Enumerate all of the existing network files
remove_files = set()
for filename in os.listdir(NETCTL_DIR):
filepath = os.path.join(NETCTL_DIR, filename)
if not filename.endswith('~') and not os.path.isdir(filepath):
remove_files.add(filepath)
netnames = []
for ifname, interface in interfaces.iteritems():
data = _get_file_data_netctl(ifname, interface)
filepath = os.path.join(NETCTL_DIR, ifname)
update_files[filepath] = data
if filepath in remove_files:
remove_files.remove(filepath)
netnames.append(ifname)
return remove_files, netnames
def process_interface_files_netcfg(update_files, interfaces):
"""Generate changeset for interface configuration"""
# Enumerate all of the existing network files
remove_files = set()
for filename in os.listdir(NETWORK_DIR):
filepath = os.path.join(NETWORK_DIR, filename)
if not filename.endswith('~') and not os.path.isdir(filepath):
remove_files.add(filepath)
netnames = []
for ifname, interface in interfaces.iteritems():
data = _get_file_data_netcfg(ifname, interface)
filepath = os.path.join(NETWORK_DIR, ifname)
update_files[filepath] = data
if filepath in remove_files:
remove_files.remove(filepath)
netnames.append(ifname)
infile = StringIO(update_files.get(CONF_FILE, ''))
data = _update_rc_conf_netcfg(infile, netnames)
update_files[CONF_FILE] = data
return remove_files, netnames
| 11,829 | 0 | 184 |
59a26a64c2087146b79e13ddeb5f087f09ca346a | 405 | py | Python | 2019/day8/day8p2.py | darkterbear/advent-of-code-2015 | 543d5a70c4b4c84081602cfa3d0ba05fe0693e54 | [
"MIT"
] | null | null | null | 2019/day8/day8p2.py | darkterbear/advent-of-code-2015 | 543d5a70c4b4c84081602cfa3d0ba05fe0693e54 | [
"MIT"
] | 2 | 2019-12-01T20:03:18.000Z | 2021-05-11T22:41:00.000Z | 2019/day8/day8p2.py | darkterbear/advent-of-code-2015 | 543d5a70c4b4c84081602cfa3d0ba05fe0693e54 | [
"MIT"
] | null | null | null | file = open('./input')
w = 25
h = 6
ppl = 25 * 6
line = file.readline()
layers = []
for start in range(0, len(line), ppl):
layer = line[start:start+ppl]
layers.append([int(pixel) for pixel in layer])
img = []
for i in range(ppl):
for layer in layers:
if layer[i] != 2:
img.append(layer[i])
break
for row in range(h):
print(img[row * w:(row + 1) * w])
| 16.875 | 50 | 0.548148 | file = open('./input')
w = 25
h = 6
ppl = 25 * 6
line = file.readline()
layers = []
for start in range(0, len(line), ppl):
layer = line[start:start+ppl]
layers.append([int(pixel) for pixel in layer])
img = []
for i in range(ppl):
for layer in layers:
if layer[i] != 2:
img.append(layer[i])
break
for row in range(h):
print(img[row * w:(row + 1) * w])
| 0 | 0 | 0 |
1341ac4d38b8fda3bf9982fdf4b559b6ffc792e4 | 7,383 | py | Python | sdk/python/pulumi_azure_native/media/v20200201preview/media_graph.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/media/v20200201preview/media_graph.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/media/v20200201preview/media_graph.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MediaGraph']
| 40.565934 | 372 | 0.63592 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MediaGraph']
class MediaGraph(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
media_graph_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sinks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MediaGraphAssetSinkArgs']]]]] = None,
sources: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MediaGraphRtspSourceArgs']]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The Media Graph.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The Media Services account name.
:param pulumi.Input[str] description: Media Graph description.
:param pulumi.Input[str] media_graph_name: The Media Graph name.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the Azure subscription.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MediaGraphAssetSinkArgs']]]] sinks: Media Graph sinks.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MediaGraphRtspSourceArgs']]]] sources: Media Graph sources.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['description'] = description
__props__['media_graph_name'] = media_graph_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if sinks is None and not opts.urn:
raise TypeError("Missing required property 'sinks'")
__props__['sinks'] = sinks
if sources is None and not opts.urn:
raise TypeError("Missing required property 'sources'")
__props__['sources'] = sources
__props__['created'] = None
__props__['last_modified'] = None
__props__['name'] = None
__props__['state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:media/v20200201preview:MediaGraph"), pulumi.Alias(type_="azure-native:media:MediaGraph"), pulumi.Alias(type_="azure-nextgen:media:MediaGraph"), pulumi.Alias(type_="azure-native:media/v20190901preview:MediaGraph"), pulumi.Alias(type_="azure-nextgen:media/v20190901preview:MediaGraph")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MediaGraph, __self__).__init__(
'azure-native:media/v20200201preview:MediaGraph',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MediaGraph':
"""
Get an existing MediaGraph resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["created"] = None
__props__["description"] = None
__props__["last_modified"] = None
__props__["name"] = None
__props__["sinks"] = None
__props__["sources"] = None
__props__["state"] = None
__props__["type"] = None
return MediaGraph(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def created(self) -> pulumi.Output[str]:
"""
Date the Media Graph was created.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Media Graph description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
Date the Media Graph was last modified.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sinks(self) -> pulumi.Output[Sequence['outputs.MediaGraphAssetSinkResponse']]:
"""
Media Graph sinks.
"""
return pulumi.get(self, "sinks")
@property
@pulumi.getter
def sources(self) -> pulumi.Output[Sequence['outputs.MediaGraphRtspSourceResponse']]:
"""
Media Graph sources.
"""
return pulumi.get(self, "sources")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Media Graph state which indicates the resource allocation status for running the media graph pipeline.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 175 | 6,774 | 23 |
b7f149b954644c463a51e9ebf379826302cf2926 | 3,210 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/word-ladder-ii.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/word-ladder-ii.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/word-ladder-ii.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(b^(d/2)), b is the branch factor of bfs, d is the result depth
# Space: O(w * l), w is the number of words, l is the max length of words
from collections import defaultdict
from string import ascii_lowercase
# Time: O(b^d), b is the branch factor of bfs, d is the result depth
# Space: O(w * l), w is the number of words, l is the max length of words
| 36.477273 | 153 | 0.523364 | # Time: O(b^(d/2)), b is the branch factor of bfs, d is the result depth
# Space: O(w * l), w is the number of words, l is the max length of words
from collections import defaultdict
from string import ascii_lowercase
class Solution(object):
def findLadders(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: List[List[str]]
"""
def backtracking(tree, beginWord, word):
return [[beginWord]] if word == beginWord else [path + [word] for new_word in tree[word] for path in backtracking(tree, beginWord, new_word)]
words = set(wordList)
if endWord not in words:
return []
tree = defaultdict(set)
is_found, left, right, is_reversed = False, {beginWord}, {endWord}, False
while left:
words -= left
new_left = set()
for word in left:
for new_word in (word[:i]+c+word[i+1:] for i in xrange(len(beginWord)) for c in ascii_lowercase):
if new_word not in words:
continue
if new_word in right:
is_found = True
else:
new_left.add(new_word)
tree[new_word].add(word) if not is_reversed else tree[word].add(new_word)
if is_found:
break
left = new_left
if len(left) > len(right):
left, right, is_reversed = right, left, not is_reversed
return backtracking(tree, beginWord, endWord)
# Time: O(b^d), b is the branch factor of bfs, d is the result depth
# Space: O(w * l), w is the number of words, l is the max length of words
class Solution2(object):
def findLadders(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: List[List[str]]
"""
dictionary = set(wordList)
result, cur, visited, found, trace = [], [beginWord], set([beginWord]), False, defaultdict(list)
while cur and not found:
for word in cur:
visited.add(word)
next = set()
for word in cur:
for i in xrange(len(word)):
for c in ascii_lowercase:
candidate = word[:i] + c + word[i + 1:]
if candidate not in visited and candidate in dictionary:
if candidate == endWord:
found = True
next.add(candidate)
trace[candidate].append(word)
cur = next
if found:
self.backtrack(result, trace, [], endWord)
return result
def backtrack(self, result, trace, path, word):
if not trace[word]:
path.append(word)
result.append(path[::-1])
path.pop()
else:
for prev in trace[word]:
path.append(word)
self.backtrack(result, trace, path, prev)
path.pop()
| 489 | 2,309 | 45 |
8c896bbcec49b232e565de5d2c577d39e4071374 | 347 | py | Python | video/write.py | TakeshiKishita/python_util | a05c4922699b4fd4545f5792d280bdbaec5a2dce | [
"Apache-2.0"
] | null | null | null | video/write.py | TakeshiKishita/python_util | a05c4922699b4fd4545f5792d280bdbaec5a2dce | [
"Apache-2.0"
] | null | null | null | video/write.py | TakeshiKishita/python_util | a05c4922699b4fd4545f5792d280bdbaec5a2dce | [
"Apache-2.0"
] | null | null | null | from abc import ABCMeta
class Writer(metaclass=ABCMeta):
"""
動画書き込みの、抽象基底クラス
"""
def open(self, **kwargs):
"""
書き込み機能を開く
"""
pass
def write(self, **kwargs):
"""
出力する
"""
pass
def close(self, **kwargs):
"""
処理を終了する
"""
pass
| 13.88 | 32 | 0.420749 | from abc import ABCMeta
class Writer(metaclass=ABCMeta):
"""
動画書き込みの、抽象基底クラス
"""
def open(self, **kwargs):
"""
書き込み機能を開く
"""
pass
def write(self, **kwargs):
"""
出力する
"""
pass
def close(self, **kwargs):
"""
処理を終了する
"""
pass
| 0 | 0 | 0 |
7ff8c4529b858969915797f2aee76fbced4479f9 | 1,064 | py | Python | npipes/utils/autodeleter.py | praxik/nPipes | 4edf8fa0d0467e3455941c46e960fdf3f43e2d31 | [
"Apache-2.0"
] | null | null | null | npipes/utils/autodeleter.py | praxik/nPipes | 4edf8fa0d0467e3455941c46e960fdf3f43e2d31 | [
"Apache-2.0"
] | null | null | null | npipes/utils/autodeleter.py | praxik/nPipes | 4edf8fa0d0467e3455941c46e960fdf3f43e2d31 | [
"Apache-2.0"
] | null | null | null | from contextlib import contextmanager, ExitStack
from pathlib import Path
from typing import Iterator
from npipes.utils.typeshed import pathlike
@contextmanager
def autoDeleteFile(path:pathlike) -> Iterator[pathlike]:
"""Context manager that deletes a single file when the context ends
"""
try:
yield path
finally:
if Path(path).is_file():
Path(path).unlink()
class AutoDeleter(ExitStack):
"""Stack manager for auto-deleting files; allows files to be added incrementally.
Useful for working with temporary files on disk that should be
removed at the end of a computation.
Ex:
with AutoDeleter() as deleter:
deleter.add(file_1)
# ...
deleter.add(file_2)
# ...
file_3 = deleter.add("some_file.txt")
# file_1, file_2, and file_3 are deleted here automatically
"""
def add(self, path:pathlike) -> pathlike:
"""Returns path after adding it to the auto-deletion context.
"""
return self.enter_context(autoDeleteFile(path))
| 28 | 85 | 0.669173 | from contextlib import contextmanager, ExitStack
from pathlib import Path
from typing import Iterator
from npipes.utils.typeshed import pathlike
@contextmanager
def autoDeleteFile(path:pathlike) -> Iterator[pathlike]:
"""Context manager that deletes a single file when the context ends
"""
try:
yield path
finally:
if Path(path).is_file():
Path(path).unlink()
class AutoDeleter(ExitStack):
"""Stack manager for auto-deleting files; allows files to be added incrementally.
Useful for working with temporary files on disk that should be
removed at the end of a computation.
Ex:
with AutoDeleter() as deleter:
deleter.add(file_1)
# ...
deleter.add(file_2)
# ...
file_3 = deleter.add("some_file.txt")
# file_1, file_2, and file_3 are deleted here automatically
"""
def add(self, path:pathlike) -> pathlike:
"""Returns path after adding it to the auto-deletion context.
"""
return self.enter_context(autoDeleteFile(path))
| 0 | 0 | 0 |
52fc3fbf8e15f41bb79aedf817ef0eba6c4362d9 | 551 | py | Python | app.py | patbahls/simple-slash-commands | feba84924db94d478c104108a245a4e120009a6a | [
"Apache-2.0"
] | null | null | null | app.py | patbahls/simple-slash-commands | feba84924db94d478c104108a245a4e120009a6a | [
"Apache-2.0"
] | null | null | null | app.py | patbahls/simple-slash-commands | feba84924db94d478c104108a245a4e120009a6a | [
"Apache-2.0"
] | null | null | null | from bottle import run,post,request,response,route
import os
import urllib
@post('/test')
@route('/path',method="post")
if __name__ == '__main__':
port_config = int(os.getenv('PORT', 5000))
run(host='0.0.0.0', port=port_config)
| 26.238095 | 79 | 0.678766 | from bottle import run,post,request,response,route
import os
import urllib
@post('/test')
def simple_test():
return "Hello World!"
@route('/path',method="post")
def gen_path_3():
postdata = request.forms.get("text")
output_path = str("sndwserv:/" + urllib.quote(postdata))
package = {"response_type": "in_channel", "text": "{}".format(output_path)}
response.content_type = 'application/json'
return package
if __name__ == '__main__':
port_config = int(os.getenv('PORT', 5000))
run(host='0.0.0.0', port=port_config)
| 267 | 0 | 44 |
99037d2d3a9ce73ad64ac10eb1f078ef701d0abc | 921 | py | Python | main.py | Fabricio872/popcorn-detector | 7ebf92a05d9761632ef63db5ebfbe61791b25e1f | [
"MIT"
] | null | null | null | main.py | Fabricio872/popcorn-detector | 7ebf92a05d9761632ef63db5ebfbe61791b25e1f | [
"MIT"
] | null | null | null | main.py | Fabricio872/popcorn-detector | 7ebf92a05d9761632ef63db5ebfbe61791b25e1f | [
"MIT"
] | null | null | null | import time as time_lib
import numpy as np
import sounddevice as sd
duration = 50 # in seconds
warmup_time = 2 # in seconds
max_pop_time = 3 # in seconds time
pop_threshold = 15 # in volume units
min_pop_time = 512 # in milliseconds
pop_times = []
if __name__ == '__main__':
main()
| 24.236842 | 88 | 0.648208 | import time as time_lib
import numpy as np
import sounddevice as sd
duration = 50 # in seconds
warmup_time = 2 # in seconds
max_pop_time = 3 # in seconds time
pop_threshold = 15 # in volume units
min_pop_time = 512 # in milliseconds
pop_times = []
def pop_time():
return time_lib.time() - pop_times[-1]
def audio_callback(indata, frames, time, status):
volume_norm = np.linalg.norm(indata) * 10
if (int(volume_norm) > pop_threshold):
if (pop_times):
# print("%f pops/second" % (round(1 / pop_time(), 2)), end='\r', flush=True)
print(len(pop_times), end='\r', flush=True)
pop_times.append(time_lib.time())
time_lib.sleep(min_pop_time / 1000)
def main():
stream = sd.InputStream(callback=audio_callback)
with stream:
sd.sleep(duration * 1000)
print('\n', len(pop_times), 'total popcorns')
if __name__ == '__main__':
main()
| 554 | 0 | 69 |
13b4db6faf2877c8928a23eab3d719532df2d032 | 2,076 | py | Python | RBM/Function.py | Shoeboxam/Neural_Network | 61da4c2e4f6603a08042612d5ff2fe334ee7b20f | [
"MIT"
] | 3 | 2017-03-11T07:21:46.000Z | 2017-09-01T20:12:06.000Z | RBM/Function.py | Shoeboxam/Neural_Network | 61da4c2e4f6603a08042612d5ff2fe334ee7b20f | [
"MIT"
] | null | null | null | RBM/Function.py | Shoeboxam/Neural_Network | 61da4c2e4f6603a08042612d5ff2fe334ee7b20f | [
"MIT"
] | null | null | null | # Functions specific to restricted boltzmann machines
# Adapted from MFP/Functions.py
import numpy as np
# BASIS FUNCTIONS: Regression
# Diagonalize first dimension of an n-dimensional array
tau = 1 # Sigmoid threshold unit
basis_logistic = Function('basis', 'logistic', # Commonly known as 'Sigmoid'
[lambda x: tau * (1 + np.exp(-x/tau))**-1, # S
lambda x: np.diag(np.exp(x / tau) / (np.exp(x / tau) + 1) ** 2)]) # S * (1 - S)
# BASIS FUNCTIONS: Classification
basis_softmax = Function('basis', 'SMax',
[softmax,
lambda x: diag(softmax(x)) - softmax(x) @ softmax(x).T])
# ANNEALING FUNCTIONS (learning rate)
anneal_fixed = Function('learn', 'fixed',
[lambda t, d, lim: 1])
anneal_linear = Function('learn', 'linear',
[lambda t, d, lim: 1 - t/lim])
anneal_inverse = Function('learn', 'inverse',
[lambda t, d, lim: 1 / (d * t)])
anneal_power = Function('learn', 'power',
[lambda t, d, lim: d**t])
anneal_exp = Function('learn', 'exp',
[lambda t, d, lim: np.exp(-t / l)])
# DISTRIBUTION FUNCTIONS
dist_uniform = Function('dist', 'uniform',
[lambda *args: np.random.uniform(low=-1, high=1, size=[*args])])
dist_normal = Function('dist', 'normal',
[lambda *args: np.random.normal(loc=0, scale=1, size=[*args])])
| 31.454545 | 107 | 0.545279 | # Functions specific to restricted boltzmann machines
# Adapted from MFP/Functions.py
import numpy as np
class Function(object):
def __init__(self, usage, name, evaluators):
self.usage = usage
self.name = name
self._evaluator = evaluators
def __call__(self, *args, d=0):
# The optional d parameter is being used to denote power of derivative
return self._evaluator[d](*args)
def __str__(self):
return self.name
def __repr__(self):
return '<' + self.usage + ' ' + self.name + '>'
# BASIS FUNCTIONS: Regression
# Diagonalize first dimension of an n-dimensional array
tau = 1 # Sigmoid threshold unit
basis_logistic = Function('basis', 'logistic', # Commonly known as 'Sigmoid'
[lambda x: tau * (1 + np.exp(-x/tau))**-1, # S
lambda x: np.diag(np.exp(x / tau) / (np.exp(x / tau) + 1) ** 2)]) # S * (1 - S)
# BASIS FUNCTIONS: Classification
def softmax(x):
temp = np.exp(x - x.max())
return temp / np.sum(temp)
basis_softmax = Function('basis', 'SMax',
[softmax,
lambda x: diag(softmax(x)) - softmax(x) @ softmax(x).T])
# ANNEALING FUNCTIONS (learning rate)
anneal_fixed = Function('learn', 'fixed',
[lambda t, d, lim: 1])
anneal_linear = Function('learn', 'linear',
[lambda t, d, lim: 1 - t/lim])
anneal_inverse = Function('learn', 'inverse',
[lambda t, d, lim: 1 / (d * t)])
anneal_power = Function('learn', 'power',
[lambda t, d, lim: d**t])
anneal_exp = Function('learn', 'exp',
[lambda t, d, lim: np.exp(-t / l)])
# DISTRIBUTION FUNCTIONS
dist_uniform = Function('dist', 'uniform',
[lambda *args: np.random.uniform(low=-1, high=1, size=[*args])])
dist_normal = Function('dist', 'normal',
[lambda *args: np.random.normal(loc=0, scale=1, size=[*args])])
| 374 | 2 | 152 |
5d50bf14a02af4c7549f9d42e27dbbd297324aa3 | 367 | py | Python | manet/data/__init__.py | jonasteuwen/manet-old | fb20c98f7e5c89a5ffe89d851ee84e7b65c5e229 | [
"BSD-2-Clause"
] | 1 | 2021-02-23T04:51:19.000Z | 2021-02-23T04:51:19.000Z | manet/data/__init__.py | jonasteuwen/manet-old | fb20c98f7e5c89a5ffe89d851ee84e7b65c5e229 | [
"BSD-2-Clause"
] | null | null | null | manet/data/__init__.py | jonasteuwen/manet-old | fb20c98f7e5c89a5ffe89d851ee84e7b65c5e229 | [
"BSD-2-Clause"
] | 1 | 2021-02-23T04:51:20.000Z | 2021-02-23T04:51:20.000Z | # encoding: utf-8
from manet.utils import read_image
import os
| 22.9375 | 77 | 0.683924 | # encoding: utf-8
from manet.utils import read_image
import os
def curr_path(fn):
dirname = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dirname, fn)
def prob_map(name='one'):
prob, metadata = read_image(curr_path('prediction_{}.nrrd'.format(name)))
prob = prob[0]
metadata['spacing'] = (0.2, 0.2)
return prob, metadata
| 256 | 0 | 46 |
b495ccf1b957e21addfca760421b5119c0becc52 | 2,094 | py | Python | app/user/utils.py | vanwt/cmdb | c1539140ab0a20d8e2be98e5d878b46848122316 | [
"MIT"
] | 1 | 2019-12-15T05:20:42.000Z | 2019-12-15T05:20:42.000Z | app/user/utils.py | vanwt/cmdb | c1539140ab0a20d8e2be98e5d878b46848122316 | [
"MIT"
] | 12 | 2020-02-12T03:10:46.000Z | 2022-02-26T21:21:46.000Z | app/user/utils.py | vanwt/cmdb | c1539140ab0a20d8e2be98e5d878b46848122316 | [
"MIT"
] | null | null | null | from .models import Menu
| 31.727273 | 70 | 0.363419 | from .models import Menu
def jwt_response(token, user=None, request=None):
if user.is_superuser:
menus = Menu.objects.all()
user_menu = []
for menu in menus:
if menu.is_parent:
c = {
"name": menu.name,
"icon": menu.icon,
"label": menu.title,
"url": menu.url,
"path": menu.path,
}
# 父组件
for m in menu.childrens.all():
c.setdefault("children", []).append({
"name": m.name,
"icon": m.icon,
"label": m.title,
"url": m.url,
"path": m.path,
})
user_menu.append(c)
else:
menus = Menu.objects.none()
for role in user.roles.all():
menus |= role.menu.all()
user_menu = []
for menu in menus:
if menu.is_parent:
c = {
"name": menu.name,
"icon": menu.icon,
"label": menu.title,
"url": menu.url,
"path": menu.path,
}
# 父组件
for m in menu.childrens.all():
if c.get("children", None) is None:
c["children"] = []
if m in menus:
c["children"].append({
"name": m.name,
"icon": m.icon,
"label": m.title,
"url": m.url,
"path": m.path,
})
user_menu.append(c)
content = {
"code": 0,
"msg": "Success !",
"token": token,
"username": user.username,
"realname": user.realname if user.realname else user.username,
"lastlogin": user.last_login,
"menu": user_menu
}
return content
| 2,057 | 0 | 23 |
01695a3f6dfb24dd21ee39aa1da81801ab132d5e | 621 | py | Python | src/cogs/ide/ide.py | Kraots/Jarvide | 75d8405dc836b1acf9c4b2abaf85fd769e6e424a | [
"MIT"
] | null | null | null | src/cogs/ide/ide.py | Kraots/Jarvide | 75d8405dc836b1acf9c4b2abaf85fd769e6e424a | [
"MIT"
] | null | null | null | src/cogs/ide/ide.py | Kraots/Jarvide | 75d8405dc836b1acf9c4b2abaf85fd769e6e424a | [
"MIT"
] | null | null | null | from .dialogs import OpenView
from src.utils import EmbedFactory
from disnake.ext import commands
class Ide(commands.Cog):
"""Ide cog"""
@commands.command()
@commands.max_concurrency(1, commands.BucketType.channel)
def setup(bot: commands.Bot) -> None:
"""Setup Ide cog"""
bot.add_cog(Ide(bot))
| 24.84 | 80 | 0.671498 | from .dialogs import OpenView
from src.utils import EmbedFactory
from disnake.ext import commands
class Ide(commands.Cog):
"""Ide cog"""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command()
@commands.max_concurrency(1, commands.BucketType.channel)
async def ide(self, ctx: commands.Context) -> None:
embed = EmbedFactory.ide_embed(ctx, "File open: No file currently open")
view = OpenView(ctx)
view.bot_message = await ctx.send(embed=embed, view=view)
def setup(bot: commands.Bot) -> None:
"""Setup Ide cog"""
bot.add_cog(Ide(bot))
| 247 | 0 | 53 |
7ed307be5ec1a721f724f1bc0d44636969fa0963 | 957 | py | Python | deps/cmark/tools/make_entities_inc.py | cboettig/hash-archive | 2f50fdc2929f60447b00561901d59c4ce83651c3 | [
"MIT"
] | 1,212 | 2015-03-26T19:08:16.000Z | 2022-01-10T08:32:45.000Z | SymbolExtractorAndRenamer/cmark/tools/make_entities_inc.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
] | 114 | 2015-03-26T18:30:53.000Z | 2017-07-21T16:25:36.000Z | SymbolExtractorAndRenamer/cmark/tools/make_entities_inc.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
] | 76 | 2015-07-29T14:39:04.000Z | 2021-04-12T07:31:47.000Z | # Creates C data structures for binary lookup table of entities,
# using python's html5 entity data.
# Usage: python3 tools/make_entities_inc.py > src/entities.inc
import html
entities5 = html.entities.html5
# remove keys without semicolons. For some reason the list
# has duplicates of a few things, like auml, one with and one
# without a semicolon.
entities = sorted([(k[:-1], entities5[k].encode('utf-8')) for k in entities5.keys() if k[-1] == ';'])
# Print out the header:
print("""/* Autogenerated by tools/make_headers_inc.py */
struct cmark_entity_node {
unsigned char *entity;
unsigned char bytes[8];
};
#define CMARK_ENTITY_MIN_LENGTH 2
#define CMARK_ENTITY_MAX_LENGTH 31""")
print("#define CMARK_NUM_ENTITIES " + str(len(entities)));
print("\nstatic const struct cmark_entity_node cmark_entities[] = {");
for (ent, bs) in entities:
print('{(unsigned char*)"' + ent + '", {' + ', '.join(map(str, bs)) + ', 0}},')
print("};")
| 29 | 101 | 0.69697 | # Creates C data structures for binary lookup table of entities,
# using python's html5 entity data.
# Usage: python3 tools/make_entities_inc.py > src/entities.inc
import html
entities5 = html.entities.html5
# remove keys without semicolons. For some reason the list
# has duplicates of a few things, like auml, one with and one
# without a semicolon.
entities = sorted([(k[:-1], entities5[k].encode('utf-8')) for k in entities5.keys() if k[-1] == ';'])
# Print out the header:
print("""/* Autogenerated by tools/make_headers_inc.py */
struct cmark_entity_node {
unsigned char *entity;
unsigned char bytes[8];
};
#define CMARK_ENTITY_MIN_LENGTH 2
#define CMARK_ENTITY_MAX_LENGTH 31""")
print("#define CMARK_NUM_ENTITIES " + str(len(entities)));
print("\nstatic const struct cmark_entity_node cmark_entities[] = {");
for (ent, bs) in entities:
print('{(unsigned char*)"' + ent + '", {' + ', '.join(map(str, bs)) + ', 0}},')
print("};")
| 0 | 0 | 0 |
3b0460cc9b63041c3382b63ad2dae215e4a417a0 | 1,128 | py | Python | Python/LearnPythonTheHardWay/ex31.py | bryarcole/The-Portfolio | 62c2573ce4f007dccf5be1d67daf97286d6b4a5e | [
"MIT"
] | null | null | null | Python/LearnPythonTheHardWay/ex31.py | bryarcole/The-Portfolio | 62c2573ce4f007dccf5be1d67daf97286d6b4a5e | [
"MIT"
] | null | null | null | Python/LearnPythonTheHardWay/ex31.py | bryarcole/The-Portfolio | 62c2573ce4f007dccf5be1d67daf97286d6b4a5e | [
"MIT"
] | null | null | null | print "You enter a dark room with two doors. Do you go thorugh door #1 or door # 2"
door = raw_input(">" )
if door == "1":
print "Theres a giant bear here earting a cheescake. What do you do?"
print "Option '1'. Take the cake"
print "Option '2'. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bears eats your face off. Loser face! "
elif bear == "2":
print "The bear eats your legs off. Good job Legless face! "
else: #haha error in the indentiuon in the book.
print "Well, doing $s is pribably better. Bear runs way " % bear
elif door == "2":
print "You stare into the endless abyss at Cthulhu's retina. "
print "1. Blueberries."
print "2. Yellow Hacket clothespins."
print "3. Understanding revolvers yelling melodies. "
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of hjello. Greatness!"
else:
print "The insanity rots your eyes int a pool of muck. great!"
else:
print "You stumble around and fall on a knife and die. You suck!"
| 34.181818 | 83 | 0.636525 | print "You enter a dark room with two doors. Do you go thorugh door #1 or door # 2"
door = raw_input(">" )
if door == "1":
print "Theres a giant bear here earting a cheescake. What do you do?"
print "Option '1'. Take the cake"
print "Option '2'. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bears eats your face off. Loser face! "
elif bear == "2":
print "The bear eats your legs off. Good job Legless face! "
else: #haha error in the indentiuon in the book.
print "Well, doing $s is pribably better. Bear runs way " % bear
elif door == "2":
print "You stare into the endless abyss at Cthulhu's retina. "
print "1. Blueberries."
print "2. Yellow Hacket clothespins."
print "3. Understanding revolvers yelling melodies. "
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of hjello. Greatness!"
else:
print "The insanity rots your eyes int a pool of muck. great!"
else:
print "You stumble around and fall on a knife and die. You suck!"
| 0 | 0 | 0 |
216c7186aa0df89e03e4f63334c276700d96765c | 2,881 | py | Python | settings/Julich_chopper_modes_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | null | null | null | settings/Julich_chopper_modes_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 1 | 2019-10-22T21:28:31.000Z | 2019-10-22T21:39:12.000Z | settings/Julich_chopper_modes_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 2 | 2019-06-06T15:06:46.000Z | 2020-07-20T02:03:22.000Z | line0.timing_system.channels.hsc.delay = 4.97e-06
line0.Phase [s] = 5.4527e-06
line0.ChopX = 36.78
line0.ChopY = 30.11
line0.description = 'S-1t'
line0.updated = '2019-05-30 14:18:48'
line1.timing_system.channels.hsc.delay = 0.0
line1.ChopX = 36.78
line1.ChopY = 31.136
line1.description = 'S-1'
line1.updated = '2019-05-30 14:25:48'
line2.timing_system.channels.hsc.delay = 8.232e-09
line2.ChopX = 36.78
line2.ChopY = 31.0579
line2.description = 'S-3'
line2.updated = '2019-05-30 14:28:12'
line3.timing_system.channels.hsc.delay = 1.372e-08
line3.ChopX = 36.78
line3.ChopY = 30.982499999999998
line3.description = 'S-5'
line3.updated = '2019-05-30 14:28:12'
line4.timing_system.channels.hsc.delay = 3.0184e-08
line4.ChopX = 36.78
line4.ChopY = 30.7563
line4.description = 'S-11'
line4.updated = '2019-05-30 14:28:12'
line5.timing_system.channels.hsc.delay = 6.86e-08
line5.ChopX = 36.78
line5.ChopY = 30.2285
line5.description = 'S-25'
line5.updated = '2019-05-30 14:28:12'
line6.timing_system.channels.hsc.delay = 0.0
line6.ChopX = 36.78
line6.ChopY = 30.555
line6.description = 'H-1'
line6.updated = '2019-05-30 14:19:34'
line7.timing_system.channels.hsc.delay = 0.0
line7.ChopX = 36.78
line7.ChopY = 30.555
line7.description = 'H-56'
line7.updated = '2019-05-30 14:17:51'
line8.timing_system.channels.hsc.delay = 0.0
line8.ChopX = 27.67
line8.ChopY = 30.925
line8.description = 'Bypass'
line8.updated = '2019-05-30 14:17:51'
motor_names = ['ChopX', 'ChopY', 'timing_system.channels.hsc.delay', 'timing_system.p0_shift']
motor_labels = ['X', 'Y', 'Phase', 'P0 Shift']
nrows = 12
formats = ['%+6.4f', '%+6.4f', 'time', 'time']
title = 'High-Speed Julich Chopper Modes'
line9.description = 'S-15'
line9.updated = '2019-05-30 14:28:12'
line9.ChopX = 36.78
line9.ChopY = 30.6055
line9.timing_system.channels.hsc.delay = 4.116e-08
line10.description = 'S-19'
line10.updated = '2019-05-30 14:28:12'
line10.ChopX = 36.78
line10.ChopY = 30.4547
line10.timing_system.channels.hsc.delay = 5.2136e-08
tolerance = [0.002, 0.002, 2.8e-09, 2.8e-09]
command_row = 9
widths = [100, 100, 100]
show_in_list = True
show_stop_button = True
command_rows = [11]
row_height = 21
names = ['X', 'Y', 'phase', 'p0_shift']
line7.timing_system.p0_shift = -1.84e-06
line8.timing_system.p0_shift = 0.0
line9.timing_system.p0_shift = -2.7871134923018455e-13
line6.timing_system.p0_shift = 0.0
line5.timing_system.p0_shift = 0.0
line4.timing_system.p0_shift = 0.0
line3.timing_system.p0_shift = -2.7871134923018455e-13
line2.timing_system.p0_shift = 0.0
line1.timing_system.p0_shift = -2.7871134923018455e-13
line0.timing_system.p0_shift = 0.0
line10.timing_system.p0_shift = 0.0
line11.ChopX = 36.78
line11.updated = '2019-06-01 08:36:18'
line11.ChopY = 30.9071
line11.timing_system.channels.hsc.delay = 1.9170000000000002e-08
line11.timing_system.p0_shift = -2.7871134923018455e-13
line11.description = 'S-7' | 33.5 | 94 | 0.737244 | line0.timing_system.channels.hsc.delay = 4.97e-06
line0.Phase [s] = 5.4527e-06
line0.ChopX = 36.78
line0.ChopY = 30.11
line0.description = 'S-1t'
line0.updated = '2019-05-30 14:18:48'
line1.timing_system.channels.hsc.delay = 0.0
line1.ChopX = 36.78
line1.ChopY = 31.136
line1.description = 'S-1'
line1.updated = '2019-05-30 14:25:48'
line2.timing_system.channels.hsc.delay = 8.232e-09
line2.ChopX = 36.78
line2.ChopY = 31.0579
line2.description = 'S-3'
line2.updated = '2019-05-30 14:28:12'
line3.timing_system.channels.hsc.delay = 1.372e-08
line3.ChopX = 36.78
line3.ChopY = 30.982499999999998
line3.description = 'S-5'
line3.updated = '2019-05-30 14:28:12'
line4.timing_system.channels.hsc.delay = 3.0184e-08
line4.ChopX = 36.78
line4.ChopY = 30.7563
line4.description = 'S-11'
line4.updated = '2019-05-30 14:28:12'
line5.timing_system.channels.hsc.delay = 6.86e-08
line5.ChopX = 36.78
line5.ChopY = 30.2285
line5.description = 'S-25'
line5.updated = '2019-05-30 14:28:12'
line6.timing_system.channels.hsc.delay = 0.0
line6.ChopX = 36.78
line6.ChopY = 30.555
line6.description = 'H-1'
line6.updated = '2019-05-30 14:19:34'
line7.timing_system.channels.hsc.delay = 0.0
line7.ChopX = 36.78
line7.ChopY = 30.555
line7.description = 'H-56'
line7.updated = '2019-05-30 14:17:51'
line8.timing_system.channels.hsc.delay = 0.0
line8.ChopX = 27.67
line8.ChopY = 30.925
line8.description = 'Bypass'
line8.updated = '2019-05-30 14:17:51'
motor_names = ['ChopX', 'ChopY', 'timing_system.channels.hsc.delay', 'timing_system.p0_shift']
motor_labels = ['X', 'Y', 'Phase', 'P0 Shift']
nrows = 12
formats = ['%+6.4f', '%+6.4f', 'time', 'time']
title = 'High-Speed Julich Chopper Modes'
line9.description = 'S-15'
line9.updated = '2019-05-30 14:28:12'
line9.ChopX = 36.78
line9.ChopY = 30.6055
line9.timing_system.channels.hsc.delay = 4.116e-08
line10.description = 'S-19'
line10.updated = '2019-05-30 14:28:12'
line10.ChopX = 36.78
line10.ChopY = 30.4547
line10.timing_system.channels.hsc.delay = 5.2136e-08
tolerance = [0.002, 0.002, 2.8e-09, 2.8e-09]
command_row = 9
widths = [100, 100, 100]
show_in_list = True
show_stop_button = True
command_rows = [11]
row_height = 21
names = ['X', 'Y', 'phase', 'p0_shift']
line7.timing_system.p0_shift = -1.84e-06
line8.timing_system.p0_shift = 0.0
line9.timing_system.p0_shift = -2.7871134923018455e-13
line6.timing_system.p0_shift = 0.0
line5.timing_system.p0_shift = 0.0
line4.timing_system.p0_shift = 0.0
line3.timing_system.p0_shift = -2.7871134923018455e-13
line2.timing_system.p0_shift = 0.0
line1.timing_system.p0_shift = -2.7871134923018455e-13
line0.timing_system.p0_shift = 0.0
line10.timing_system.p0_shift = 0.0
line11.ChopX = 36.78
line11.updated = '2019-06-01 08:36:18'
line11.ChopY = 30.9071
line11.timing_system.channels.hsc.delay = 1.9170000000000002e-08
line11.timing_system.p0_shift = -2.7871134923018455e-13
line11.description = 'S-7' | 0 | 0 | 0 |
0f65b48452a107e448f515377b6c8ebc0545e05b | 1,520 | py | Python | example/classifier_shogun.py | vishalbelsare/jubakit | f6252ba627ce4e2e42eb9aafaaf05c882bc1c678 | [
"MIT"
] | 12 | 2016-04-11T04:49:08.000Z | 2019-02-08T01:43:46.000Z | example/classifier_shogun.py | vishalbelsare/jubakit | f6252ba627ce4e2e42eb9aafaaf05c882bc1c678 | [
"MIT"
] | 138 | 2016-04-11T05:57:48.000Z | 2020-09-26T03:09:31.000Z | example/classifier_shogun.py | vishalbelsare/jubakit | f6252ba627ce4e2e42eb9aafaaf05c882bc1c678 | [
"MIT"
] | 10 | 2016-04-11T03:18:45.000Z | 2018-04-14T10:11:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Using Classifier and String Features
========================================
This is a famous `shogun` classifier example that predicts family name
of Shogun from his first name.
"""
from jubakit.classifier import Classifier, Schema, Dataset, Config
from jubakit.loader.csv import CSVLoader
# Load the shogun dataset.
train_loader = CSVLoader('shogun.train.csv')
test_loader = CSVLoader('shogun.test.csv')
# Define a Schema that defines types for each columns of the CSV file.
schema = Schema({
'family_name': Schema.LABEL,
'first_name': Schema.STRING,
})
# Create a Dataset.
train_dataset = Dataset(train_loader, schema).shuffle()
test_dataset = Dataset(test_loader, schema)
# Create a Classifier Service.
cfg = Config(
method = 'PA',
converter = {
'string_rules': [{'key': 'first_name', 'type': 'unigram', 'sample_weight': 'bin', 'global_weight': 'bin'}]
}
)
classifier = Classifier.run(cfg)
# Train the classifier.
for _ in classifier.train(train_dataset): pass
# Classify using the classifier.
for (idx, label, result) in classifier.classify(test_dataset):
true_family_name = label
pred_family_name = result[0][0]
first_name = test_dataset.get(idx)['first_name']
print("{0} {1} ({2})".format(
pred_family_name,
first_name,
'correct!' if pred_family_name == true_family_name else 'incorrect'
))
# Stop the classifier.
classifier.stop()
| 27.142857 | 110 | 0.710526 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Using Classifier and String Features
========================================
This is a famous `shogun` classifier example that predicts family name
of Shogun from his first name.
"""
from jubakit.classifier import Classifier, Schema, Dataset, Config
from jubakit.loader.csv import CSVLoader
# Load the shogun dataset.
train_loader = CSVLoader('shogun.train.csv')
test_loader = CSVLoader('shogun.test.csv')
# Define a Schema that defines types for each columns of the CSV file.
schema = Schema({
'family_name': Schema.LABEL,
'first_name': Schema.STRING,
})
# Create a Dataset.
train_dataset = Dataset(train_loader, schema).shuffle()
test_dataset = Dataset(test_loader, schema)
# Create a Classifier Service.
cfg = Config(
method = 'PA',
converter = {
'string_rules': [{'key': 'first_name', 'type': 'unigram', 'sample_weight': 'bin', 'global_weight': 'bin'}]
}
)
classifier = Classifier.run(cfg)
# Train the classifier.
for _ in classifier.train(train_dataset): pass
# Classify using the classifier.
for (idx, label, result) in classifier.classify(test_dataset):
true_family_name = label
pred_family_name = result[0][0]
first_name = test_dataset.get(idx)['first_name']
print("{0} {1} ({2})".format(
pred_family_name,
first_name,
'correct!' if pred_family_name == true_family_name else 'incorrect'
))
# Stop the classifier.
classifier.stop()
| 0 | 0 | 0 |
e48653b6767ad90aa0abfd6ebe35c13ae15166f0 | 1,024 | py | Python | labs/lab-10/fileScale.py | schnur/oss-repo-template | d9e3ea7cae43dd1dd1ff7acef8b1249f3a95a848 | [
"MIT"
] | null | null | null | labs/lab-10/fileScale.py | schnur/oss-repo-template | d9e3ea7cae43dd1dd1ff7acef8b1249f3a95a848 | [
"MIT"
] | null | null | null | labs/lab-10/fileScale.py | schnur/oss-repo-template | d9e3ea7cae43dd1dd1ff7acef8b1249f3a95a848 | [
"MIT"
] | null | null | null | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from PIL import ImageOps
'''def turnWhite(imageName, newName):
img = Image.open(imageName+'.png')
img = img.convert("RGBA")
datas = img.getdata()
newData = []
for item in datas:
if item[3]!=0:
newData.append((255, 255, 255, 255))
else:
newData.append(item)
img.putdata(newData)
img.save(newName+".png", "PNG") '''
img = Image.open("shoe1.jpg")
img = ImageOps.grayscale(img)
np_im = np.array(img)
print(np_im.shape)
np_im = (np_im - np.min(np_im))/np.ptp(np_im)
#print(np_im.shape)
#datas=img.getdata()
#print(datas)
#newData = []
#for item in datas:
#newData.append((item[0]/255,item[1]/255,item[2]/255,item[3]))
#img.putdata(newData)
plt.imshow(np_im)
plt.show()
#img.save("new"+".jpg", "JPEG")
#new_im = Image.fromarray(np_im)
#new_im.save("new.jpg")
img.close()
#np_im = np.array(im)
#print(np_im)
#new_arr = ((np_im + 0) * (1/1) * 255).astype('uint8')
#print(new_arr) | 22.26087 | 66 | 0.636719 | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from PIL import ImageOps
'''def turnWhite(imageName, newName):
img = Image.open(imageName+'.png')
img = img.convert("RGBA")
datas = img.getdata()
newData = []
for item in datas:
if item[3]!=0:
newData.append((255, 255, 255, 255))
else:
newData.append(item)
img.putdata(newData)
img.save(newName+".png", "PNG") '''
img = Image.open("shoe1.jpg")
img = ImageOps.grayscale(img)
np_im = np.array(img)
print(np_im.shape)
np_im = (np_im - np.min(np_im))/np.ptp(np_im)
#print(np_im.shape)
#datas=img.getdata()
#print(datas)
#newData = []
#for item in datas:
#newData.append((item[0]/255,item[1]/255,item[2]/255,item[3]))
#img.putdata(newData)
plt.imshow(np_im)
plt.show()
#img.save("new"+".jpg", "JPEG")
#new_im = Image.fromarray(np_im)
#new_im.save("new.jpg")
img.close()
#np_im = np.array(im)
#print(np_im)
#new_arr = ((np_im + 0) * (1/1) * 255).astype('uint8')
#print(new_arr) | 0 | 0 | 0 |
a636144d3d5c2b26863dc0a67f5fc0f3e314319a | 1,813 | py | Python | hemp/web/services/nso.py | nobuhikosekiya/sbx_multi_ios | 9a8e540617d46fd98f466d89e1f9af4f8a1797aa | [
"MIT"
] | 64 | 2018-08-18T01:13:18.000Z | 2021-12-09T17:46:35.000Z | hemp/web/services/nso.py | nobuhikosekiya/sbx_multi_ios | 9a8e540617d46fd98f466d89e1f9af4f8a1797aa | [
"MIT"
] | 45 | 2018-08-16T21:26:11.000Z | 2021-12-13T19:58:20.000Z | hemp/web/services/nso.py | nobuhikosekiya/sbx_multi_ios | 9a8e540617d46fd98f466d89e1f9af4f8a1797aa | [
"MIT"
] | 37 | 2018-09-23T04:09:53.000Z | 2021-11-11T16:39:37.000Z | import os
import requests
from base64 import b64encode
from flask import render_template
BASE_URL = os.getenv("NSO_URL", "http://localhost:8080")
API_ROOT = BASE_URL + '/api/running'
NSO_USERNAME = os.getenv("NSO_USERNAME", "admin")
NSO_PASSWORD = os.getenv("NSO_PASSWORD", "admin")
HEADERS = {
'Content-Type': "application/vnd.yang.data+json",
'authorization': "Basic {}".format(b64encode(b':'.join((NSO_USERNAME,
NSO_PASSWORD)
)
).strip()
),
'accept': "application/vnd.yang.collection+json"
}
def send_post(url):
"""
used to pass through NSO requests
"""
HEADERS['accept'] = 'application/vnd.yang.data+json'
if not url.startswith('/'):
url = "/{}".format(url)
url = BASE_URL + url
resp = requests.post(url, headers=HEADERS)
return resp
| 32.375 | 73 | 0.599559 | import os
import requests
from base64 import b64encode
from flask import render_template
BASE_URL = os.getenv("NSO_URL", "http://localhost:8080")
API_ROOT = BASE_URL + '/api/running'
NSO_USERNAME = os.getenv("NSO_USERNAME", "admin")
NSO_PASSWORD = os.getenv("NSO_PASSWORD", "admin")
HEADERS = {
'Content-Type': "application/vnd.yang.data+json",
'authorization': "Basic {}".format(b64encode(b':'.join((NSO_USERNAME,
NSO_PASSWORD)
)
).strip()
),
'accept': "application/vnd.yang.collection+json"
}
def send_post(url):
"""
used to pass through NSO requests
"""
HEADERS['accept'] = 'application/vnd.yang.data+json'
if not url.startswith('/'):
url = "/{}".format(url)
url = BASE_URL + url
resp = requests.post(url, headers=HEADERS)
return resp
def get_configured_vpns():
HEADERS['accept'] = 'application/vnd.yang.collection+json'
resp = requests.get(API_ROOT + "/vpn", headers=HEADERS)
print resp.text
data = resp.json()
return data['collection']['vpn:vpn']
def add_vpn(**kwargs):
payload = render_template('xml/new-vpn.xml', **kwargs)
xml_headers = HEADERS
xml_headers['Content-Type'] = "application/vnd.yang.data+xml"
xml_headers['Accept'] = "application/vnd.yang.data+xml"
resp = requests.post(API_ROOT, data=payload, headers=xml_headers)
return (resp, payload)
def get_vpn_details(partner_name):
HEADERS['accept'] = 'application/vnd.yang.data+json'
url = API_ROOT + "/vpn/{}".format(partner_name)
resp = requests.get(url, headers=HEADERS)
data = resp.json()
return data['vpn:vpn']
| 740 | 0 | 69 |