text stringlengths 4 1.02M | meta dict |
|---|---|
import sys
import random
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.utils.py3 import parse_qsl
from libcloud.utils.py3 import u
from libcloud.utils.py3 import unittest2_required
if unittest2_required:
import unittest2 as unittest
else:
import unittest
XML_HEADERS = {'content-type': 'application/xml'}
class LibcloudTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
self._visited_urls = []
self._executed_mock_methods = []
super(LibcloudTestCase, self).__init__(*args, **kwargs)
def setUp(self):
self._visited_urls = []
self._executed_mock_methods = []
def _add_visited_url(self, url):
self._visited_urls.append(url)
def _add_executed_mock_method(self, method_name):
self._executed_mock_methods.append(method_name)
def assertExecutedMethodCount(self, expected):
actual = len(self._executed_mock_methods)
self.assertEqual(actual, expected,
'expected %d, but %d mock methods were executed'
% (expected, actual))
class multipleresponse(object):
"""
A decorator that allows MockHttp objects to return multi responses
"""
count = 0
func = None
def __init__(self, f):
self.func = f
def __call__(self, *args, **kwargs):
ret = self.func(self.func.__class__, *args, **kwargs)
response = ret[self.count]
self.count = self.count + 1
return response
class MockResponse(object):
"""
A mock HTTPResponse
"""
headers = {}
body = StringIO()
status = 0
reason = ''
version = 11
def __init__(self, status, body=None, headers=None, reason=None):
self.status = status
self.body = StringIO(u(body)) if body else StringIO()
self.headers = headers or self.headers
self.reason = reason or self.reason
def read(self, *args, **kwargs):
return self.body.read(*args, **kwargs)
def next(self):
if sys.version_info >= (2, 5) and sys.version_info <= (2, 6):
return self.body.next()
else:
return next(self.body)
def __next__(self):
return self.next()
def getheader(self, name, *args, **kwargs):
return self.headers.get(name, *args, **kwargs)
def getheaders(self):
return list(self.headers.items())
def msg(self):
raise NotImplemented
class BaseMockHttpObject(object):
def _get_method_name(self, type, use_param, qs, path):
path = path.split('?')[0]
meth_name = path.replace('/', '_').replace('.', '_').replace('-', '_')
if type:
meth_name = '%s_%s' % (meth_name, self.type)
if use_param and use_param in qs:
param = qs[use_param][0].replace('.', '_').replace('-', '_')
meth_name = '%s_%s' % (meth_name, param)
if meth_name == '':
meth_name = 'root'
return meth_name
class MockHttp(BaseMockHttpObject):
"""
A mock HTTP client/server suitable for testing purposes. This replaces
`HTTPConnection` by implementing its API and returning a mock response.
Define methods by request path, replacing slashes (/) with underscores (_).
Each of these mock methods should return a tuple of:
(int status, str body, dict headers, str reason)
>>> mock = MockHttp('localhost', 8080)
>>> mock.request('GET', '/example/')
>>> response = mock.getresponse()
>>> response.body.read()
'Hello World!'
>>> response.status
200
>>> response.getheaders()
[('X-Foo', 'libcloud')]
>>> MockHttp.type = 'fail'
>>> mock.request('GET', '/example/')
>>> response = mock.getresponse()
>>> response.body.read()
'Oh Noes!'
>>> response.status
403
>>> response.getheaders()
[('X-Foo', 'fail')]
"""
responseCls = MockResponse
host = None
port = None
response = None
type = None
use_param = None # will use this param to namespace the request function
test = None # TestCase instance which is using this mock
proxy_url = None
def __init__(self, host, port, *args, **kwargs):
self.host = host
self.port = port
def request(self, method, url, body=None, headers=None, raw=False):
# Find a method we can use for this request
parsed = urlparse.urlparse(url)
scheme, netloc, path, params, query, fragment = parsed
qs = parse_qs(query)
if path.endswith('/'):
path = path[:-1]
meth_name = self._get_method_name(type=self.type,
use_param=self.use_param,
qs=qs, path=path)
meth = getattr(self, meth_name.replace('%', '_'))
if self.test and isinstance(self.test, LibcloudTestCase):
self.test._add_visited_url(url=url)
self.test._add_executed_mock_method(method_name=meth_name)
status, body, headers, reason = meth(method, url, body, headers)
self.response = self.responseCls(status, body, headers, reason)
def getresponse(self):
return self.response
def connect(self):
"""
Can't think of anything to mock here.
"""
pass
def close(self):
pass
def set_http_proxy(self, proxy_url):
self.proxy_url = proxy_url
# Mock request/response example
def _example(self, method, url, body, headers):
"""
Return a simple message and header, regardless of input.
"""
return (httplib.OK, 'Hello World!', {'X-Foo': 'libcloud'},
httplib.responses[httplib.OK])
def _example_fail(self, method, url, body, headers):
return (httplib.FORBIDDEN, 'Oh Noes!', {'X-Foo': 'fail'},
httplib.responses[httplib.FORBIDDEN])
class MockHttpTestCase(MockHttp, unittest.TestCase):
# Same as the MockHttp class, but you can also use assertions in the
# classes which inherit from this one.
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self)
if kwargs.get('host', None) and kwargs.get('port', None):
MockHttp.__init__(self, *args, **kwargs)
def runTest(self):
pass
def assertUrlContainsQueryParams(self, url, expected_params, strict=False):
"""
Assert that provided url contains provided query parameters.
:param url: URL to assert.
:type url: ``str``
:param expected_params: Dictionary of expected query parameters.
:type expected_params: ``dict``
:param strict: Assert that provided url contains only expected_params.
(defaults to ``False``)
:type strict: ``bool``
"""
question_mark_index = url.find('?')
if question_mark_index != -1:
url = url[question_mark_index + 1:]
params = dict(parse_qsl(url))
if strict:
self.assertDictEqual(params, expected_params)
else:
for key, value in expected_params.items():
self.assertEqual(params[key], value)
class StorageMockHttp(MockHttp):
def putrequest(self, method, action, skip_host=0, skip_accept_encoding=0):
pass
def putheader(self, key, value):
pass
def endheaders(self):
pass
def send(self, data):
pass
class MockRawResponse(BaseMockHttpObject):
"""
Mock RawResponse object suitable for testing.
"""
type = None
responseCls = MockResponse
def __init__(self, connection):
super(MockRawResponse, self).__init__()
self._data = []
self._current_item = 0
self._status = None
self._response = None
self._headers = None
self._reason = None
self.connection = connection
def next(self):
if self._current_item == len(self._data):
raise StopIteration
value = self._data[self._current_item]
self._current_item += 1
return value
def __next__(self):
return self.next()
def _generate_random_data(self, size):
data = ''
current_size = 0
while current_size < size:
value = str(random.randint(0, 9))
value_size = len(value)
data += value
current_size += value_size
return data
@property
def response(self):
return self._get_response_if_not_available()
@property
def status(self):
self._get_response_if_not_available()
return self._status
@property
def headers(self):
self._get_response_if_not_available()
return self._headers
@property
def reason(self):
self._get_response_if_not_available()
return self._reason
def _get_response_if_not_available(self):
if not self._response:
meth_name = self._get_method_name(type=self.type,
use_param=False, qs=None,
path=self.connection.action)
meth = getattr(self, meth_name.replace('%', '_'))
result = meth(self.connection.method, None, None, None)
self._status, self._body, self._headers, self._reason = result
self._response = self.responseCls(self._status, self._body,
self._headers, self._reason)
return self._response
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"content_hash": "35bc9823ab2b263da8b5ae6e573e163e",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 79,
"avg_line_length": 28.75147928994083,
"alnum_prop": 0.5795431158674624,
"repo_name": "niteoweb/libcloud",
"id": "76f5b0cbfc4af9e11032c1be4a1324d402bbd4ea",
"size": "10500",
"binary": false,
"copies": "4",
"ref": "refs/heads/niteoweb_internal_release",
"path": "libcloud/test/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "5214020"
},
{
"name": "Shell",
"bytes": "13868"
}
],
"symlink_target": ""
} |
import arcpy, os, string, zipfile, shutil, PyPDF2, tablib
from arcpy import mapping
from PyPDF2 import PdfFileMerger, PdfFileReader
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import inch
from reportlab.platypus import BaseDocTemplate, Paragraph, frames, Table, TableStyle, Frame, flowables, Flowable, PageTemplate
import datetime
now = datetime.datetime.now()
month = now.strftime("%B")
suffixDate = now.strftime("%Y%m%d")
arcpy.AddMessage(str(now))
#
#
#Variables:
#
#
#INPUT VARIABLES:
#variables listed and commented out for optional use as an arcgis tool
# cycle = arcpy.GetParameterAsText(0)
# dataYear = arcpy.GetParameterAsText(1)
# dbaseNAME = arcpy.GetParameterAsText(2)
# output = arcpy.GetParameterAsText(3)
cycle = "North"
dataYear = "2015"
dbaseNAME = "Connection to Comanche.sde"
output = "C:\\TxDOT\\County Road Inventory Mapbooks\\official\\2015\\QC_" + cycle + "_" + suffixDate
# for maximum performance efficiency and this script to run: copy the 'Resources' folder
# from the T drive from here: T:\DATAMGT\MAPPING\Mapping Products\County Road Inventory Mapbooks
#input the location of the 'Resources' folder on your local machine by populating this path variable
resourcesFolder = "C:\\TxDOT\\County Road Inventory Mapbooks"
#DEPENDENT VARIABLES
if not os.path.exists(output + os.sep + dataYear):
os.makedirs(output + os.sep + dataYear)
outputDir = output + os.sep + dataYear
#create working geodatabase
arcpy.CreateFileGDB_management(outputDir, "Working_Data.gdb")
workspace = outputDir + os.sep + "Working_Data.gdb"
#SHAPEFILE/FC VARIABLES
if cycle == "North":
CountyLayer = resourcesFolder + "\\Resources\\South and North County Shapefiles\\NorthCounties.shp"
elif cycle == "South":
CountyLayer = resourcesFolder + "\\Resources\\South and North County Shapefiles\\SouthCounties.shp"
else:
arcpy.AddError("You must use ether 'North' or 'South' for your cycle option")
inventory2 = workspace + os.sep + "Roadway_Events_Dissolved"
projectedRoads = workspace + os.sep + "Roadway_Events_Projected"
dissRoads = workspace + os.sep + "RoadLog_Dissolved"
#DIRECTORY VARIABLES
CoversFolder = outputDir + os.sep + "Covers"
LegendFolder = outputDir + os.sep + "Legend"
IndexFolder = outputDir + os.sep + "GridIndexes"
ShapefileFolder = outputDir + os.sep + "Shapefiles"
MapPagesFolder = outputDir + os.sep + "MapPages"
MapBooksFolder = outputDir + os.sep + "Combined_PDF"
RoadLogFolder = outputDir + os.sep + "RoadLog"
PDFLogFolder = RoadLogFolder + os.sep + "PDF"
completedPackets = outputDir + os.sep + "_Completed_Packets"
descriptiveDirectory = completedPackets + os.sep + "_Descriptive_PDFs"
#
dataGDB = outputDir + os.sep + "Data_Copy.gdb"
GridLayer = dataGDB + os.sep + "County_Grids_22K"
subfiles = dataGDB + os.sep + "SUBFILES"
txdotRoadways = dataGDB + os.sep + "TXDOT_Roadways"
prevYear = str(int(dataYear) - 1)
pubRDS = dataGDB + os.sep + "TXDOT_RTE_RDBD_LN_" + prevYear + "_Q4"
#compile global county name and number lists
lowerNames = []
upperNames = []
countyNumbers = []
cursor = arcpy.SearchCursor(CountyLayer)
for row in cursor:
lowerNames.append(str(row.CNTY_NM))
upperNames.append(str(row.CNTY_NM).upper())
countyNumbers.append(str(row.CNTY_NBR).replace('.0', "")) # adjusted to fix float problem
del cursor
arcpy.AddMessage("County names and numbers lists compiled.")
#create global county total mileage dictionary for use with report functions
countyTotals = {}
#
#
#
#Define the functions of the process
#
#
#
#copy data local
def copyDataLocal():
#create a file GDB and copy Comanche data local
arcpy.CreateFileGDB_management(outputDir, "Data_Copy.gdb")
arcpy.AddMessage("Database created.")
arcpy.Copy_management(
"Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Map_Index_Grids\\TPP_GIS.APP_TPP_GIS_ADMIN.County_Grids_22K",
dataGDB + os.sep + "County_Grids_22K")
arcpy.AddMessage("County Grids copied.")
arcpy.Copy_management("Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.SUBFILES",
dataGDB + os.sep + "SUBFILES")
arcpy.AddMessage("SUBFILES copied.")
arcpy.Copy_management(
"Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways",
dataGDB + os.sep + "TXDOT_Roadways")
arcpy.AddMessage("TxDOT Roadways copied.")
# arcpy.Copy_management(
# "Database Connections\\"+dbaseNAME+"\\TPP_GIS.APP_TPP_GIS_ADMIN.Outbound\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_RTE_RDBD_LN_" + prevYear + "_Q4",
# dataGDB + os.sep + "TXDOT_RTE_RDBD_LN_" + prevYear + "_Q4")
# arcpy.AddMessage("Q4 Outbound Roadways copied.")
arcpy.Copy_management("Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.County\\TPP_GIS.APP_TPP_GIS_ADMIN.County",
dataGDB + os.sep + "Counties")
arcpy.AddMessage("County Boundaries copied.")
arcpy.Copy_management("Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City",
dataGDB + os.sep + "Cities")
arcpy.AddMessage("Cities copied.")
arcpy.Copy_management(
"Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.City_Streets",
dataGDB + os.sep + "Streets")
arcpy.AddMessage("City Streets copied.")
arcpy.Copy_management("Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City_Points",
dataGDB + os.sep + "City_Points")
arcpy.Copy_management("Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Water\\TPP_GIS.APP_TPP_GIS_ADMIN.Dams",
dataGDB + os.sep + "Dam")
arcpy.Copy_management(
"Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Water\\TPP_GIS.APP_TPP_GIS_ADMIN.Water_Bodies",
dataGDB + os.sep + "Water_Bodies")
arcpy.Copy_management("Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Water\\TPP_GIS.APP_TPP_GIS_ADMIN.Streams",
dataGDB + os.sep + "Streams")
arcpy.AddMessage("Streams copied.")
arcpy.Copy_management("Database Connections\\" + dbaseNAME + "\\TPP_GIS.APP_TPP_GIS_ADMIN.Park\\TPP_GIS.APP_TPP_GIS_ADMIN.Public_Lands_2014",
dataGDB + os.sep + "Public_Lands")
arcpy.AddMessage("Public Lands copied.")
arcpy.AddMessage("Comanche data copied local.")
#
#1
#COVER
#
#
def createCovers():
arcpy.AddMessage("Generating Covers...")
#make directory
os.makedirs(outputDir + os.sep + "Covers")
map = arcpy.mapping.MapDocument(resourcesFolder + "\\Resources\\MXD\\CRI_Covers.mxd")
dataFrame = arcpy.mapping.ListDataFrames(map)[0]
for i in lowerNames:
for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"):
if textElement.name == "CountyName":
textElement.text = i + " - " + dataYear
if textElement.name == "Year":
textElement.text = "Copyright " + dataYear + " TxDOT"
arcpy.AddMessage(i + " Cover Complete.")
arcpy.mapping.ExportToPDF(map, CoversFolder + os.sep + i)
del map
del dataFrame
arcpy.AddMessage("Covers Complete.")
#
#2
#LEGEND
#
#
def createLegend():
arcpy.AddMessage("Generating Legend...")
#make directory
os.makedirs(outputDir + os.sep + "Legend")
lastYear = int(dataYear) - 1
map = arcpy.mapping.MapDocument(resourcesFolder + "\\Resources\\MXD\\Legend.mxd")
dataFrame = arcpy.mapping.ListDataFrames(map)[0]
for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"):
if textElement.name == "Title":
textElement.text = "County Road Inventory " + str(dataYear)
if textElement.name == "Copyright":
textElement.text = "Copyright " + str(dataYear) + " Texas Department of Transportation "
if textElement.name == "Disclaimer1":
textElement.text = str(lastYear) + "."
arcpy.RefreshActiveView()
arcpy.mapping.ExportToPDF(map, LegendFolder + os.sep + "Legend_" + str(dataYear) + ".pdf")
del map
del dataFrame
arcpy.AddMessage("Legend Complete.")
#
#3
#GRID INDEX
#
#
def createGridIndex():
arcpy.AddMessage("Updating the Grid Indexes...")
#make directory
os.makedirs(outputDir + os.sep + "GridIndexes")
map = arcpy.mapping.MapDocument(resourcesFolder + "\\Resources\\MXD\\GridIndexUpdate.mxd")
dataFrame = arcpy.mapping.ListDataFrames(map)[0]
for lyr in arcpy.mapping.ListLayers(map):
if lyr.name == "TPP_GIS.MCHAMB1.County_Grids_22K":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "County_Grids_22K")
if lyr.name == "TPP_GIS.MCHAMB1.County":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Counties")
if lyr.name == "TXDOT_Roadways":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "TXDOT_Roadways")
if lyr.name == "City":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Cities")
arcpy.RefreshActiveView()
for i in lowerNames:
county = i
for lyr in arcpy.mapping.ListLayers(map):
if lyr.name == "TPP_GIS.MCHAMB1.County_Grids_22K":
lyr.definitionQuery = "CNTY_NM = '" + county + "'"
arcpy.RefreshActiveView()
extent = lyr.getSelectedExtent()
for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"):
if textElement.name == "topYEAR":
textElement.text = dataYear
if textElement.name == "nayme":
textElement.text = county + " County - Mapbook Index"
if textElement.name == "bottomDate":
textElement.text = now.strftime("%B") + " " + now.strftime("%d") + ", " + dataYear
if textElement.name == "copyright":
textElement.text = "Copyright " + dataYear
if textElement.name == "finalDate":
lastYear = int(dataYear) - 1
textElement.text = str(lastYear) + "."
dataFrame.extent = extent
dataFrame.scale *= 1.05
arcpy.RefreshActiveView()
arcpy.mapping.ExportToPDF(map, IndexFolder + os.sep + county + " County Mapbook Index.pdf")
arcpy.AddMessage(county + " County Mapbook Index.pdf")
del map
del dataFrame
arcpy.AddMessage("Map Indexes Complete.")
#
#4
#FORMAT DATA
#prep data for county shapefile and road log creation
#
def formatData():
arcpy.AddMessage("Formating Data...")
#
arcpy.AddMessage("Database connection established. Routing roadways event layer.")
#route subfile events onto txdot roadways and create a shapefile for creating a shapefile for each county
tempRTEevents = "tempRTEevents"
# arcpy.MakeRouteEventLayer_lr(pubRDS, "RTE_ID", subfiles, "RTE_ID LINE BMP EMP", tempRTEevents)
arcpy.MakeRouteEventLayer_lr(txdotRoadways, "RTE_ID", subfiles, "RTE_ID LINE BMP EMP", tempRTEevents)
eventlayer = mapping.Layer(tempRTEevents)
eventlayer.definitionQuery = """ "SUBFILE" = 2 AND "HIGHWAY_STATUS" = 4 AND "ADMIN_SYSTEM" = 2 """
arcpy.AddMessage("Event layer created.")
arcpy.FeatureClassToFeatureClass_conversion(eventlayer, workspace, "Roadway_Events")
inventory = workspace + os.sep + "Roadway_Events"
arcpy.AddMessage("Event layer saved to the workspace database.")
#pull the full street names from txdot roadways
arcpy.AddMessage("Starting street name update")
#define a dictionary to use to compile the roadway names
dictNM = {}
#use the search cursor to collect the names and put them in the dictionary
cursor = arcpy.da.SearchCursor(txdotRoadways, ["RTE_ID", "FULL_ST_NM"], """RTE_CLASS = '2'""")
for row in cursor:
ID = row[0]
name = row[1]
dictNM[str(ID)] = str(name)
del cursor
arcpy.AddMessage("Names collected from TxDOT_Roadways")
#create a field in the inventory roads and apply the collected names from the dictionary
arcpy.AddField_management(inventory, "ST_NAME", "TEXT", "", "", 50)
arcpy.AddMessage("Field created")
cursor = arcpy.da.UpdateCursor(inventory, ["RTE_ID", "CONTROLSEC", "ST_NAME"])
for row in cursor:
ID = row[0]
pre = row[1]
CS = str(pre).split("A")[-1]
if ID in dictNM.keys():
if str(dictNM[ID]) is None or str(dictNM[ID]) == " ":
row[2] = "County Road " + CS
cursor.updateRow(row)
else:
row[2] = str(dictNM[ID])
cursor.updateRow(row)
else:
print "ID not in txdotRoadways but in variable 'inventory': " + ID
row[2] = "County Road " + CS
cursor.updateRow(row)
del cursor
arcpy.AddMessage("Street names applied.")
#make a copy of the routed roadways in the statewide projection for the road log process later
spatialRef = arcpy.Describe(CountyLayer).spatialReference
arcpy.Project_management(inventory, projectedRoads, spatialRef)
arcpy.AddMessage("Roadway events re-projected for the road log.")
#the next 4 groups of code have been added in a recent version of this script after successful runs
# revealed a need to dissolve rows in the shapefile for each county.
#add a unique flag field and populate it based on the attributes its row
arcpy.AddField_management(inventory, "unique", "TEXT", "", "", 250)
cursor = arcpy.da.UpdateCursor(inventory, ["COUNTY", "CONTROLSEC", "HIGHWAY_DESIGN", "SURFACE_TYPE", "NUMBER_OF_LANES", "unique"])
for row in cursor:
row[5] = str(row[0]) + str(row[1]) + str(row[2]) + str(row[3]) + str(row[4])
cursor.updateRow(row)
del cursor
arcpy.AddMessage("Unique flag field created and populated.")
#use the unique field to dissolve the roads. this removes multiple features within the final
# county shapefiles that have all the same attributes. This problem exists because of subfiles records
# with the same attributes amongst the fields used here but different attributes in fields not used
#inventory2 = workspace + os.sep + "Roadway_Events_Dissolved"
arcpy.Dissolve_management(inventory, inventory2, ["unique"], [["LEN_OF_SECTION","SUM"],["ST_NAME","FIRST"],["CONTROLSEC","FIRST"],["HIGHWAY_DESIGN","FIRST"],["SURFACE_TYPE","FIRST"],["COUNTY", "FIRST"],["NUMBER_OF_LANES","FIRST"]], "MULTI_PART")
arcpy.AddMessage("The routed events have been 'uniquely' dissolved.")
#add new fields to ensure the shapefiles have proper field names since esri won't let you just change a field name
arcpy.AddField_management(inventory2, "ROUTE", "TEXT", "", "", 10)
arcpy.AddField_management(inventory2, "ST_NAME", "TEXT", "", "", 50)
arcpy.AddField_management(inventory2, "LENGTH", "DOUBLE")
arcpy.AddField_management(inventory2, "SURFACE", "LONG")
arcpy.AddField_management(inventory2, "DESIGN", "LONG")
arcpy.AddField_management(inventory2, "LANES", "LONG")
arcpy.AddField_management(inventory2, "COMMENTS", "TEXT", "", "", 100)
arcpy.AddField_management(inventory2, "COUNTY", "LONG")
arcpy.AddMessage("Replacement fields have been created.")
#populate the new fields with the data from the dissolved fields with the ugly names
cursor = arcpy.da.UpdateCursor(inventory2, ["FIRST_CONTROLSEC", "SUM_LEN_OF_SECTION", "FIRST_SURFACE_TYPE", "FIRST_HIGHWAY_DESIGN", "FIRST_NUMBER_OF_LANES", "FIRST_ST_NAME", "FIRST_COUNTY", "ROUTE", "LENGTH", "SURFACE", "DESIGN", "LANES", "ST_NAME", "COUNTY"])
for row in cursor:
CS = row[0]
sumlen = row[1]
surface = row[2]
design = row[3]
lanes = row[4]
name = row[5]
county = row[6]
row[7] = CS
row[8] = sumlen
row[9] = surface
row[10] = design
row[11] = lanes
row[12] = name
row[13] = county
cursor.updateRow(row)
del cursor
arcpy.AddMessage("New Fields have been populated.")
#continue with the formatting data process. remove unwanted fields.
deleteFields = ["unique", "SUM_LEN_OF_SECTION", "FIRST_ST_NAME", "FIRST_CONTROLSEC", "FIRST_HIGHWAY_DESIGN", "FIRST_SURFACE_TYPE", "FIRST_COUNTY", "FIRST_NUMBER_OF_LANES"]
arcpy.DeleteField_management(inventory2, deleteFields)
arcpy.AddMessage("Fields reconfigured to match data dictionary.")
arcpy.AddMessage("Data Formatted.")
#
#5
#SHAPEFILES
#
#
#this function was copied from previous the years' map book script.
def createShapefiles():
arcpy.AddMessage("Creating Shapefiles...")
#make directory
os.makedirs(outputDir + os.sep + "Shapefiles")
#reference the dissolved roadway events feature class from the workspace
#inventory2 = workspace + os.sep + "Roadway_Events_Dissolved"
#iterate through the county names list and create a shapefile for each one
countRange = range(0, len(upperNames))
arcpy.AddMessage("Found %s counties..." % len(upperNames))
for i in countRange:
COUNTY_NAME = upperNames[i]
shapeFileName = COUNTY_NAME + "_INVENTORY_" + dataYear +".shp"
shapeFilePath = ShapefileFolder + os.sep + shapeFileName
shapeFileDefQ = "\"COUNTY\" = "+ str(countyNumbers[i])
arcpy.Select_analysis(inventory2, shapeFilePath, shapeFileDefQ)
arcpy.AddMessage("%s definition query: %s" % (shapeFileName, shapeFileDefQ))
arcpy.AddMessage("%s of %s Exporting County %s" % (i+1 , len(countRange),shapeFileName))
arcpy.AddMessage("Shapefiles Complete.")
#
#6
#MAP PAGES
#
#
def createMapPages():
arcpy.AddMessage("Generating Map Pages...")
#make directory
os.makedirs(outputDir + os.sep + "MapPages")
map = arcpy.mapping.MapDocument(resourcesFolder + "\\Resources\\MXD\\CountyRoadInventoryMaps.mxd")
dataFrame = arcpy.mapping.ListDataFrames(map)[0]
for lyr in arcpy.mapping.ListLayers(map):
if lyr.name == "Centerline":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "TXDOT_Roadways")
if lyr.name == "CountyRoadsRouted":
lyr.replaceDataSource(workspace, "FILEGDB_WORKSPACE", "Roadway_Events")
lyr.visible = True
arcpy.AddMessage("Routed Roadways layer replaced.")
if lyr.name == "Streets":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Streets")
if lyr.name == "TPP_GIS.MCHAMB1.County":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Counties")
if lyr.name == "TPP_GIS.MCHAMB1.City_Points":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "City_Points")
if lyr.name == "Dam":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Dam")
if lyr.name == "TPP_GIS.MCHAMB1.Water_Bodies":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Water_Bodies")
if lyr.name == "TPP_GIS.MCHAMB1.Streams":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Streams")
if lyr.name == "TPP_GIS.MCHAMB1.Public_Lands":
lyr.replaceDataSource(dataGDB, "FILEGDB_WORKSPACE", "Public_Lands")
for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"):
if textElement.name == "Year":
textElement.text = dataYear
arcpy.RefreshActiveView()
cursor = arcpy.SearchCursor(GridLayer, "CYCLE = '" + cycle + "' ", "", "CNTY_NM; MAP_ID; WEST; NORTH; EAST; SOUTH; SHAPE", "CNTY_NM A")
for row in cursor:
dataFrame.extent = row.shape.extent
MapID = str(row.Map_ID)
for textElement in arcpy.mapping.ListLayoutElements(map, "TEXT_ELEMENT"):
if textElement.name == "PageNumber":
textElement.text = " Page-" + MapID
if textElement.name == "CountyName":
textElement.text = row.CNTY_NM + " County"
if textElement.name == "East":
textElement.text = row.East
if row.East == 0:
textElement.text = " "
if textElement.name == "West":
textElement.text = row.West
if row.West == 0:
textElement.text = " "
if textElement.name == "North":
textElement.text = row.North
if row.North == 0:
textElement.text = " "
if textElement.name == "South":
textElement.text = row.South
if row.South == 0:
textElement.text = " "
arcpy.RefreshActiveView()
arcpy.mapping.ExportToPDF(map, MapPagesFolder + os.sep + row.CNTY_NM + " " + MapID + ".pdf")
arcpy.AddMessage(MapPagesFolder + os.sep + row.CNTY_NM + " " + MapID + ".pdf")
del cursor
del map
del dataFrame
arcpy.AddMessage("Map Pages Complete.")
#
#7
#COMBINE MAPBOOKS
#
#
def combineMapbooks():
arcpy.AddMessage("Compiling Mapbooks for each county...")
#make directory
os.makedirs(outputDir + os.sep + "Combined_PDF")
#compile a dictionary of the number of pages for each county
pageDICT = {}
cursor = arcpy.SearchCursor(CountyLayer)
for row in cursor:
currentCNTY = row.getValue("CNTY_U")
numPages = row.getValue("Max_Pages")
pageDICT[str(currentCNTY)] = str(numPages)
del cursor
arcpy.AddMessage("Number-of-pages dictionary compiled.")
#iterate through the counties within the dictionary and compile all the page
# numbers up until its maximum number of pages from the dictionary value
for eachCO in pageDICT.keys():
#announce the current county being compiled and the number of pages being compiled for that county
arcpy.AddMessage(str(eachCO) + " has " + str(pageDICT[eachCO]) + " pages.")
theGoal = pageDICT[eachCO]
#use the PyPDF2 module to merge the PDFs
merger = PdfFileMerger()
theCover = CoversFolder + os.sep + str(eachCO) + ".pdf"
theIndex = IndexFolder + os.sep + str(eachCO) + " County Mapbook Index.pdf"
theLegend = LegendFolder + os.sep + "Legend_" + dataYear + ".pdf"
merger.append(PdfFileReader(file(theCover, 'rb')))
merger.append(PdfFileReader(file(theIndex, 'rb')))
merger.append(PdfFileReader(file(theLegend, 'rb')))
x = 1
while x <= int(theGoal):
currentpage = x
pagevalue = str(currentpage)
thePage = MapPagesFolder + os.sep + str(eachCO) + " " + pagevalue + ".pdf"
merger.append(PdfFileReader(file(thePage, 'rb')))
arcpy.AddMessage(str(eachCO) + " page " + pagevalue + " of " + str(theGoal))
x += 1
theOutput = open(MapBooksFolder + os.sep + str(eachCO) + "_MAPBOOK_" + dataYear + ".pdf", "wb")
merger.write(theOutput)
theOutput.close()
arcpy.AddMessage(str(eachCO) + " complete.")
arcpy.AddMessage("Mapbooks Compiled.")
#
#8
#Road Logs/Report data prep
#
#
#
#C:\TxDOT\Scripts\CountyRoadInventoryMapBook\ROAD_LOG_INSTRUCTION\New folder\FINAL_How to create the County Road Update Summary_1.doc
#replicated the process described in the process above.
#
#Report prep: here we go...
#
def formatRoadLog():
arcpy.AddMessage("Generating Road Log...")
#make directory
os.makedirs(outputDir + os.sep + "RoadLog")
#projectedRoads = workspace + os.sep + "Roadway_Events_Projected"
#intersect the county boundaries, county grids, and routed roads
logRoads = workspace + os.sep + "RoadLog_Intersect"
arcpy.Intersect_analysis([projectedRoads,GridLayer], logRoads)
arcpy.AddMessage("Intersect Complete.")
#clean up the intersect of grids which overlap neighbor counties.
# recalculate the new segment lengths since the intersect broken the linework at the county and grid boundaries.
cursor = arcpy.da.UpdateCursor(logRoads, ["SHAPE@LENGTH", "BMP", "EMP", "LEN_OF_SECTION", "COUNTY", "CNTY_NBR"])
for row in cursor:
#recalculate the length for the cut up linework
newlen = row[0] * .000621371
newerlen = format(float(newlen), '.3f')
row[3] = newerlen
# newBMP = row[0].extent.MMin
# newEMP = row[0].extent.MMax
# newBMP = str(newBMP)
# newEMP = str(newEMP)
# numB = newBMP.split(".")[0]
# decB = newBMP.split(".")[1]
# keepB = decB[:3]
# newBMP2 = numB + "." + keepB
# newBMP2 = float(newBMP2)
# numE = newEMP.split(".")[0]
# decE = newEMP.split(".")[1]
# keepE = decE[:3]
# newEMP2 = numE + "." + keepE
# newEMP2 = float(newEMP2)
#
# row[1] = newBMP2
# row[2] = newEMP2
# row[3] = abs(newEMP2 - newBMP2)
cursor.updateRow(row)
#remove overlapping neighbor grids
#get the linework county number:
frst = row[4]
#get the county boundary county name:
frth = row[5]
#deletes the row if the linework county doesn't match the county boundary number
if frst != int(frth):
cursor.deleteRow()
else:
pass
del cursor
arcpy.AddMessage("Intersected roadways have been cleaned up.")
#
#this section is the VB script replacement
arcpy.AddMessage("Compiling page numbers")
#sort table properly and collect the numbers via a cursor
cursor = arcpy.SearchCursor(logRoads,"","","RTE_ID; MAP_ID","RTE_ID A; MAP_ID A")
current = ""
previous = ""
counter = 0
endAll = int(arcpy.GetCount_management(logRoads).getOutput(0))
beAll = endAll - 1
thesePages = []
dictionary = {}
#use the searchCursor to compile all the page numbers for each route ID into a list, and then
# use that list as the value with the route ID as the key in the dictionary
for row in cursor:
current = row.getValue("RTE_ID")
if counter == 0:
previous = current
thesePages.append("," + str(row.getValue("MAP_ID")).replace('.0',""))
counter += 1
elif previous == current and counter != 0 and counter != beAll:
if "," + str(row.getValue("MAP_ID")).replace('.0',"") not in thesePages:
thesePages.append("," + str(row.getValue("MAP_ID")).replace('.0',""))
counter += 1
else:
counter += 1
elif previous == current and counter == beAll:
if "," + str(row.getValue("MAP_ID")).replace('.0',"") not in thesePages:
thesePages.append("," + str(row.getValue("MAP_ID")).replace('.0',""))
thesePages[0] = str(thesePages[0]).replace(",", "")
concatPGS = ''.join(thesePages)
dictionary[str(previous)] = concatPGS
counter += 1
elif previous != current and counter == beAll:
thesePages[0] = str(thesePages[0]).replace(",", "")
concatPGS = ''.join(thesePages)
dictionary[str(previous)] = concatPGS
thesePages = []
previous = current
dictionary[str(previous)] = str(row.getValue("MAP_ID")).replace('.0',"")
counter += 1
else:
thesePages[0] = str(thesePages[0]).replace(",", "")
concatPGS = ''.join(thesePages)
dictionary[str(previous)] = concatPGS
thesePages = []
previous = current
thesePages.append("," + str(row.getValue("MAP_ID")).replace('.0',""))
counter += 1
del cursor
arcpy.AddMessage("The page numbers have been compiled into the dictionary.")
#summarize the attributes in to remove multiple subfiles with the same attributes of the fields used in the report
arcpy.AddField_management(logRoads, "unique", "TEXT", "", "", 250)
cursor = arcpy.da.UpdateCursor(logRoads, ["CNTY_NM", "CONTROLSEC", "HIGHWAY_DESIGN", "SURFACE_TYPE", "NUMBER_OF_LANES", "unique"])
for row in cursor:
NAM = row[0]
CS = row[1]
HD = row[2]
ST = row[3]
NL = row[4]
row[5] = str(NAM) + str(CS) + str(HD) + str(ST) + str(NL)
cursor.updateRow(row)
del cursor
arcpy.AddMessage("Unique flag identifier has been created and populated.")
#Dissolve the road log lines and apply the page numbers
arcpy.Dissolve_management(logRoads, dissRoads, ["unique"], [["LEN_OF_SECTION","SUM"],["RTE_ID","FIRST"],["ST_NAME","FIRST"],["CNTY_NM","FIRST"],["CONTROLSEC","FIRST"],["HIGHWAY_DESIGN","FIRST"],["SURFACE_TYPE","FIRST"],["NUMBER_OF_LANES","FIRST"]])
arcpy.AddMessage("Road Log Linework dissolved.")
#add the page numbers to the summarized routes so that we have all the road log data ready for the report
arcpy.AddField_management(dissRoads, "MAP_ID", "TEXT", "", "", 150)
cursor = arcpy.da.UpdateCursor(dissRoads, ["FIRST_RTE_ID", "MAP_ID"])
for row in cursor:
rteID = row[0]
if rteID in dictionary.keys():
row[1] = str(dictionary[rteID])
else:
arcpy.AddError(str(rteID) + " has no page numbers in the dictionary!")
cursor.updateRow(row)
del cursor
arcpy.AddMessage("Page numbers applied into the new MAP_ID field.")
arcpy.AddMessage("Road Log Completed.")
#
#
#9
#Report generation
#
#
#
def createRoadLogReport():
arcpy.AddMessage("Starting PDF generation...")
#make directory
os.makedirs(RoadLogFolder + os.sep + "PDF")
#iterate through the list of county names to create a report for each county
for Y in lowerNames:
#import the dimensions for the report and create variable to determine the
# maximum measurements for that page size
from reportlab.lib.pagesizes import letter
width, height = letter
#create a variable for the 'flowable' data drawing area on the report. this variable draws
# the location where the road summary data is inserted into the report template
f = frames.Frame(.5*inch,inch, width-inch, 8.65*inch)
#create the document
doc = BaseDocTemplate(PDFLogFolder + os.sep + str(Y).upper() + "_ROAD_SUMMARY_" + str(dataYear) + ".pdf", pagesize=letter)
#drawn the canvas/template of the report
def thecanvas(c, doc):
from reportlab.lib.pagesizes import letter
width, height = letter
#the template/canvas is object oriented. this is a list of all the objects, where they are
# to be drawn, and all the defining information which draws them
#the objects are listed from the top of the page down to the bottom
c.setFont("Helvetica-Bold",18)
c.drawCentredString(width/2,height - .5*inch, str(Y))
c.setFont("Helvetica",14)
c.drawCentredString(width/2,height - .75*inch, "COUNTY ROAD SUMMARY")
c.setFont("Times-Roman",12)
c.drawCentredString(width/2,height - .93*inch, "Texas Department of Transportation")
c.setFont("Times-Roman",8)
c.drawCentredString(width/2,height - 1.07*inch, "Transportation Planning and Programming Division")
c.setFont("Times-Bold",9)
c.drawString(.57*inch,9.7*inch, "ROUTE")
c.drawString(1.55*inch,9.7*inch, "ROAD NAME")
c.drawString(3.25*inch,9.7*inch, "LENGTH")
c.drawString(3.95*inch,9.7*inch, "DESIGN")
c.drawString(4.7*inch,9.7*inch, "SURFACE")
c.drawString(5.48*inch,9.7*inch, "LANES")
c.drawString(6.25*inch,9.7*inch, "PAGE(S)")
c.line(.5*inch,9.65*inch,width-.5*inch,9.65*inch)
#the frame which contains the table and data will be here
c.line(.5*inch,inch,width-.5*inch,inch)
c.setFont("Times-Bold",8)
c.drawString(.5*inch,.88*inch, month + " " + str(dataYear))
c.drawString(2.5*inch,.85*inch, "Key:")
c.drawString(3*inch,.85*inch, "Design:")
c.drawString(3*inch,.7*inch, "1 = One Way")
c.drawString(3*inch,.55*inch, "2 = Two Way")
c.drawString(3*inch,.4*inch, "3 = Boulevard (Blvd)")
c.drawString(4.5*inch,.85*inch, "Surface Type:")
c.drawString(4.5*inch,.7*inch, "10 = Natural")
c.drawString(4.5*inch,.55*inch, "32 = All Weather")
c.drawString(5.8*inch,.7*inch, "51 = Paved")
c.drawString(5.8*inch,.55*inch, "61 = Concrete")
pageNUM = c.getPageNumber()
c.drawRightString(width-.5*inch,.88*inch, "Page " + str(pageNUM))
#apply the canvas/template and the frame for the flowable road data to the document
doc.addPageTemplates([PageTemplate(frames=[f],onPage=thecanvas)])
#search the formatted road log feature class via a cursor. query the feature class for the county being
# reported, sort, create a list of the attributes from each row, and apply the list to a list of all the data rows
cursor = arcpy.SearchCursor(dissRoads, "FIRST_CNTY_NM = '" + Y + "'", "", "", "FIRST_CONTROLSEC A; FIRST_ST_NAME A; SUM_LEN_OF_SECTION D; FIRST_HIGHWAY_DESIGN D; FIRST_SURFACE_TYPE D; FIRST_NUMBER_OF_LANES D")
finalCount = -1
for row in cursor:
finalCount += 1
del cursor
cursor = arcpy.SearchCursor(dissRoads, "FIRST_CNTY_NM = '" + Y + "'", "", "", "FIRST_CONTROLSEC A; FIRST_ST_NAME A; SUM_LEN_OF_SECTION D; FIRST_HIGHWAY_DESIGN D; FIRST_SURFACE_TYPE D; FIRST_NUMBER_OF_LANES D")
elements = []
data = []
counter = 1
pageSum = 0
countySum = 0
totalCount = 0
for row in cursor:
if counter < 42 and totalCount != finalCount:
CS = str(row.getValue("FIRST_CONTROLSEC"))
SN = str(row.getValue("FIRST_ST_NAME"))
rounded = round(row.getValue("SUM_LEN_OF_SECTION"), 3)
LN = str(rounded)
pageSum += rounded
countySum += rounded
HD = str(row.getValue("FIRST_HIGHWAY_DESIGN"))
ST = str(row.getValue("FIRST_SURFACE_TYPE"))
LA = str(row.getValue("FIRST_NUMBER_OF_LANES"))
PG = str(row.getValue("MAP_ID"))
eachLine = [CS, SN, LN, HD, ST, LA, PG]
data.append(eachLine)
counter += 1
totalCount += 1
elif counter == 42 and totalCount != finalCount:
eachLine = ["", " PAGE TOTAL MILES:", str(pageSum), "", "", "", ""]
data.append(eachLine)
counter = 1
pageSum = 0
CS = str(row.getValue("FIRST_CONTROLSEC"))
SN = str(row.getValue("FIRST_ST_NAME"))
rounded = round(row.getValue("SUM_LEN_OF_SECTION"), 3)
LN = str(rounded)
pageSum += rounded
countySum += rounded
HD = str(row.getValue("FIRST_HIGHWAY_DESIGN"))
ST = str(row.getValue("FIRST_SURFACE_TYPE"))
LA = str(row.getValue("FIRST_NUMBER_OF_LANES"))
PG = str(row.getValue("MAP_ID"))
eachLine = [CS, SN, LN, HD, ST, LA, PG]
data.append(eachLine)
counter += 1
totalCount += 1
elif totalCount == finalCount:
CS = str(row.getValue("FIRST_CONTROLSEC"))
SN = str(row.getValue("FIRST_ST_NAME"))
rounded = round(row.getValue("SUM_LEN_OF_SECTION"), 3)
LN = str(rounded)
pageSum += rounded
countySum += rounded
HD = str(row.getValue("FIRST_HIGHWAY_DESIGN"))
ST = str(row.getValue("FIRST_SURFACE_TYPE"))
LA = str(row.getValue("FIRST_NUMBER_OF_LANES"))
PG = str(row.getValue("MAP_ID"))
eachLine = [CS, SN, LN, HD, ST, LA, PG]
data.append(eachLine)
eachLine = ["", " PAGE TOTAL MILES:", str(pageSum), "", "", "", ""]
data.append(eachLine)
eachLine = ["", " COUNTY TOTAL MILES:", str(countySum), "", "", "", ""]
data.append(eachLine)
#add the county total to the countyTotal dictionary for an xls output at end
countyTotals[str(Y)] = countySum
#draw the table, apply the data list, and format/stylize it
t = Table(data, colWidths=[inch,1.75*inch,.8*inch,.75*inch,.75*inch,.65*inch,1.8*inch],rowHeights=[.2*inch]*len(data))
t.setStyle(TableStyle([('FONTSIZE',(0,0),(6,(len(data)-1)),8),('ALIGN',(0,0),(6,(len(data)-1)),'LEFT'),]))
#add the data object (in this case: the populated table of roads) to a list of 'flowable' objects
elements.append(t)
#use the 'flowable' objects list and build the document
doc.build(elements)
del cursor
arcpy.AddMessage(str(Y) + " completed.")
arcpy.AddMessage("PDF generation complete.")
#
#10
#Put together all the documents
#ZIP it UP
#
#
def compilePackets():
arcpy.AddMessage("Zipping up the packets for each county...")
os.makedirs(outputDir + os.sep + "_Completed_Packets")
os.makedirs(completedPackets + os.sep + "_Descriptive_PDFs")
#copy the annually updated documents which accompany all packets
shutil.copyfile(resourcesFolder + "\\Resources\\Documents\\COUNTY_ROAD_CRITERIA.pdf", descriptiveDirectory + os.sep + "COUNTY_ROAD_CRITERIA.pdf")
shutil.copyfile(resourcesFolder + "\\Resources\\Documents\\INSTRUCTIONS.pdf", descriptiveDirectory + os.sep + "INSTRUCTIONS.pdf")
shutil.copyfile(resourcesFolder + "\\Resources\\Documents\\README_1ST.pdf", descriptiveDirectory + os.sep + "README_1ST.pdf")
arcpy.AddMessage("Annual descriptive documents copied.")
#define file extensions list to collect the pieces of the shapefiles
theFileExtension = [".dbf",".prj",".sbn",".sbx",".shp",".shx"]
arcpy.AddMessage("County and extension lists compiled.")
#iterate through the list of county names and compile the packet for each
countRange = len(upperNames)
arcpy.AddMessage("Found %s counties..." % countRange)
i = 1
for theCounty in upperNames:
theOutputZip = completedPackets + os.sep + theCounty + "_" + dataYear + ".zip"
zippedFile = zipfile.ZipFile(theOutputZip,"a", zipfile.ZIP_DEFLATED)
arcpy.AddMessage("%s of %s - Zipping files for %s..." % (i,countRange,theCounty))
# Add the County Road Criteria PDF to the Zip File...
zippedFile.write(descriptiveDirectory + os.sep + "COUNTY_ROAD_CRITERIA.pdf", "COUNTY_ROAD_CRITERIA.pdf")
# Add the Instructions PDF to the Zip File...
zippedFile.write(descriptiveDirectory + os.sep + "INSTRUCTIONS.pdf", "INSTRUCTIONS.pdf")
# Add the ReadME PDF to the Zip File...
zippedFile.write(descriptiveDirectory + os.sep + "README_1ST.pdf", "README_1ST.pdf")
# Add the Road Summary PDF to the Zip File...
roadLogsFile = theCounty + "_ROAD_SUMMARY_" + dataYear + ".pdf"
zippedFile.write(PDFLogFolder + os.sep + roadLogsFile,roadLogsFile)
# Add the Mapbook Page to the Zip file
countyMapbookFile = theCounty + "_MAPBOOK_" + dataYear + ".pdf"
zippedFile.write(MapBooksFolder + os.sep + countyMapbookFile,countyMapbookFile)
# Make a list of Geometry File Names...
theGeometryFiles = []
for extentionType in theFileExtension:
theGeometryFiles.append(theCounty +"_INVENTORY_" + dataYear + extentionType)
# Add the Geometry to the Zip file...
for eachFile in theGeometryFiles:
theTargetFile = ShapefileFolder + os.sep + eachFile
zippedFile.write(theTargetFile,eachFile)
arcpy.AddMessage("%s complete." % theCounty)
# Close the Zip file...
zippedFile.close()
i += 1
arcpy.AddMessage("County packets zipped up and completed.")
#
#
#
#
#
#
def xlsTotals():
arcpy.AddMessage("Compiling mileage total xls for internal use...")
data = tablib.Dataset(headers=["NAME", "MILES", "ROUNDED"])
for i in countyTotals.keys():
name = i
miles = countyTotals[i]
rounded = round(float(miles), 0)
row = (name, miles, rounded)
data.append(row)
arcpy.AddMessage(str(i) + " County: " + str(miles) + " miles rounded to: " + str(rounded))
newfile = open(outputDir + os.sep + "TotalMileages.xls", "wb").write(data.xls)
arcpy.AddMessage("Mileage total xls compiled.")
#
#
#
#
#
#
arcpy.AddMessage("And away we go...")
copyDataLocal()
createCovers()
createLegend()
createGridIndex()
formatData()
createShapefiles()
createMapPages()
combineMapbooks()
formatRoadLog()
createRoadLogReport()
compilePackets()
xlsTotals()
arcpy.AddMessage("Phew...finally finished.")
now2 = datetime.datetime.now()
arcpy.AddMessage("Started at: " + str(now))
arcpy.AddMessage("Ended at: " + str(now2)) | {
"content_hash": "78fcd0f5739ac9dae80c6ec0d00ac36f",
"timestamp": "",
"source": "github",
"line_count": 995,
"max_line_length": 264,
"avg_line_length": 41.414070351758795,
"alnum_prop": 0.6297231052976436,
"repo_name": "adambreznicky/python",
"id": "5fed73a1d2c074d5add1b23e83c4f3b1ed008f41",
"size": "41390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CountyRoadInventoryMapbook_20150618/Programs/_CountyRoadInventoryMapbook_v5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2498272"
},
{
"name": "Visual Basic",
"bytes": "40594"
}
],
"symlink_target": ""
} |
from django import template
from django.contrib import admin
from django.contrib.gis.geos import GEOSGeometry
from django.core.management import settings
register = template.Library()
@register.tag
def collect(token):
bits = list(token.split_contents())
if len(bits) > 3 and bits[-2] == 'as':
varname = bits[-1]
items = bits[1:-2]
return CollectNode(items, varname)
else:
raise template.TemplateSyntaxError('%r expected format is "item [item ...] as varname"'
% bits[0])
class CollectNode(template.Node):
def __init__(self, items, varname):
self.items = map(template.Variable, items)
self.varname = varname
def render(self, context):
context[self.varname] = [i.resolve(context) for i in self.items]
return ''
class AssignNode(template.Node):
def __init__(self, name, value):
self.name = name
self.value = value
def render(self, context):
context[self.name] = self.value.resolve(context, True)
return ''
def do_assign(parser, token):
"""
Assign an expression to a variable in the current context.
Syntax::
{% assign [name] [value] %}
Example::
{% assign list entry.get_related %}
"""
bits = token.contents.split()
if len(bits) != 3:
raise template.TemplateSyntaxError("'%s' tag takes two arguments" % bits[0])
value = parser.compile_filter(bits[2])
return AssignNode(bits[1], value)
register = template.Library()
register.tag('assign', do_assign)
# Extra template tags for map
@register.filter()
def get_lat_lng(value, gc):
lat = GEOSGeometry(value).coords[1]
lon = GEOSGeometry(value).coords[0]
if gc == 'lat':
return "{}".format(lat)
elif gc == 'lon':
return "{}".format(lon)
@register.filter()
def filter_coords(value):
sites = list()
for site in value:
lat = GEOSGeometry(site.featuregeometry).coords[1]
lon = GEOSGeometry(site.featuregeometry).coords[0]
if lat != 0 and lon != 0:
sites.append(site)
return sites
@register.filter()
def get_title(value, short):
if value == 'site_title':
return admin.site.site_title
elif value == 'site_header':
return admin.site.site_header
elif value == 'shortcut_title':
return settings.ADMIN_SHORTCUTS[0]['shortcuts'][short]['title']
@register.filter()
def in_field(value):
val = value.split(" ")
return val[0]
# settings value
@register.simple_tag
def settings_value(name):
return getattr(settings, name, "")
# https://stackoverflow.com/questions/771890/how-do-i-get-the-class-of-a-object-within-a-django-template
@register.filter(name='get_class')
def get_class(value):
return value.__class__.__name__ | {
"content_hash": "83806c9cfdd629c92cd69b9fb3ac423e",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 104,
"avg_line_length": 25.486486486486488,
"alnum_prop": 0.6299045599151644,
"repo_name": "miguelcleon/ODM2-Admin",
"id": "082c9fd6a56f933da8ca39ee0a448beb98a1395d",
"size": "2947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odm2admin/templatetags/admin_extras.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23546"
},
{
"name": "Dockerfile",
"bytes": "1343"
},
{
"name": "HTML",
"bytes": "216455"
},
{
"name": "JavaScript",
"bytes": "491230"
},
{
"name": "PLpgSQL",
"bytes": "270807"
},
{
"name": "Python",
"bytes": "718550"
},
{
"name": "Shell",
"bytes": "2012"
}
],
"symlink_target": ""
} |
"""
Script to copy images to Wikimedia Commons, or to another wiki.
Syntax:
python imagetransfer.py pagename [-interwiki] [-tolang:xx] [-tofamily:yy]
Arguments:
-interwiki Look for images in pages found through interwiki links.
-keepname Keep the filename and do not verify description while replacing
-tolang:xx Copy the image to the wiki in language xx
-tofamily:yy Copy the image to a wiki in the family yy
-file:zz Upload many files from textfile: [[Image:xx]]
[[Image:yy]]
If pagename is an image description page, offers to copy the image to the
target site. If it is a normal page, it will offer to copy any of the images
used on that page, or if the -interwiki argument is used, any of the images
used on a page reachable via interwiki links.
"""
#
# (C) Andre Engels, 2004
# (C) Pywikibot team, 2004-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
import re
import sys
import pywikibot
import upload
from pywikibot import config, i18n, textlib
copy_message = {
'ar': u"هذه الصورة تم نقلها من %s. الوصف الأصلي كان:\r\n\r\n%s",
'en': u"This image was copied from %s. The original description was:\r\n\r\n%s",
'fa': u"تصویر از %s کپی شدهاست.توضیحات اصلی ان این بود::\r\n\r\n%s",
'de': u"Dieses Bild wurde von %s kopiert. Die dortige Beschreibung lautete:\r\n\r\n%s",
'fr': u"Cette image est copiée de %s. La description originale était:\r\n\r\n%s",
'he': u"תמונה זו הועתקה מהאתר %s. תיאור הקובץ המקורי היה:\r\n\r\n%s",
'hu': u"Kép másolása innen: %s. Az eredeti leírás:\r\n\r\n%s",
'ia': u"Iste imagine esseva copiate de %s. Le description original esseva:\r\n\r\n%s",
'it': u"Questa immagine è stata copiata da %s. La descrizione originale era:\r\n\r\n%s",
'kk': u"Бұл сурет %s дегеннен көшірілді. Түпнұсқа сипатттамасы былай болды:\r\n\r\n%s",
'lt': u"Šis paveikslėlis buvo įkeltas iš %s. Originalus aprašymas buvo:\r\n\r\n%s",
'nl': u"Afbeelding gekopieerd vanaf %s. De beschrijving daar was:\r\n\r\n%s",
'pl': u"Ten obraz został skopiowany z %s. Oryginalny opis to:\r\n\r\n%s",
'pt': u"Esta imagem foi copiada de %s. A descrição original foi:\r\n\r\n%s",
'ru': u"Изображение было скопировано с %s. Оригинальное описание содержало:\r\n\r\n%s",
'sr': u"Ова слика је копирана са %s. Оригинални опис је:\r\n\r\n%s",
'zh': u"本圖像從 %s 複製,原始說明資料:\r\n\r\n%s",
}
nowCommonsTemplate = {
'ar': u'{{subst:الآن_كومنز|Image:%s}}',
'de': u'{{NowCommons|%s}}',
'fr': u'{{Désormais sur Commons|%s}}',
'en': u'{{subst:ncd|Image:%s}}',
'fa': u'{{موجود در انبار|%s}}',
'he': u'{{גם בוויקישיתוף|%s}}',
'hu': u'{{azonnali-commons|Kép:%s}}',
'ia': u'{{OraInCommons|Imagine:%s}}',
'it': u'{{NowCommons unlink|%s}}',
'ja': u'{{NowCommons|Image:%s}}',
'kk': u'{{NowCommons|Image:%s}}',
'li': u'{{NowCommons|%s}}',
'lt': u'{{NowCommons|Image:%s}}',
'nds-nl': u'{{NoenCommons|File:%s}}',
'nl': u'{{NuCommons|Image:%s}}',
'pl': u'{{NowCommons|%s}}',
'pt': u'{{NowCommons|%s}}',
'sr': u'{{NowCommons|%s}}',
'zh': u'{{NowCommons|Image:%s}}',
}
nowCommonsMessage = {
'ar': u'الملف الآن متوفر في ويكيميديا كومنز.',
'de': u'Datei ist jetzt auf Wikimedia Commons verfügbar.',
'en': u'File is now available on Wikimedia Commons.',
'eo': u'Dosiero nun estas havebla en la Wikimedia-Komunejo.',
'fa': u'پرونده اکنون در انبار است',
'he': u'הקובץ זמין כעת בוויקישיתוף.',
'hu': u'A fájl most már elérhető a Wikimedia Commonson',
'ia': u'Le file es ora disponibile in Wikimedia Commons.',
'ja': u'ファイルはウィキメディア・コモンズにあります',
'it': u'L\'immagine è adesso disponibile su Wikimedia Commons.',
'kk': u'Файлды енді Wikimedia Ортаққорынан қатынауға болады.',
'lt': u'Failas įkeltas į Wikimedia Commons projektą.',
'nl': u'Dit bestand staat nu op [[w:nl:Wikimedia Commons|Wikimedia Commons]].',
'pl': u'Plik jest teraz dostępny na Wikimedia Commons.',
'pt': u'Arquivo está agora na Wikimedia Commons.',
'ru': u'[[ВП:КБУ#Ф8|Ф.8]]: доступно на [[Викисклад]]е',
'sr': u'Слика је сада доступна и на Викимедија Остави.',
'zh': u'檔案已存在於維基共享資源。',
}
# Translations for license templates.
# Must only be given when they are in fact different.
licenseTemplates = {
('wikipedia:de', 'commons:commons'): {
u'Bild-GFDL': u'GFDL',
u'Bild-GFDL-OpenGeoDB': u'GFDL-OpenGeoDB',
u'Bild-Innweb-Lizenz': u'Map-Austria-GNU',
u'Bild-PD': u'PD',
u'Bild-PD-alt': u'PD-old',
u'Bild-PD-Kunst': u'PD-Art',
u'Bild-PD-US': u'PD-USGov',
},
('wikipedia:fa', 'commons:commons'): {
u'مالکیت عمومی': u'PD',
u'مالکیت عمومی-خود': u'PD-self',
u'مجوز گنو': u'GFDL',
u'مجوز گنو-خود': u'GFDL-self',
u'نگاره قدیمی': u'PD-Iran',
u'نگاره نوشتاری': u'PD-textlogo',
u'نگاره عراقی': u'PD-Iraq',
u'نگاره بریتانیا': u'PD-UK',
u'نگاره هابل': u'PD-Hubble',
u'نگاره آمریکا': u'PD-US',
u'نگاره دولت آمریکا': u'PD-USGov',
u'کک-یاد-دو': u'Cc-by-2.0',
u'کک-یاد-حفظ-دونیم': u'Cc-by-sa-2.5',
u'کک-یاد-سه': u'Cc-by-3.0',
},
('wikipedia:fr', 'commons:commons'): {
u'Domaine public': u'PD'
},
('wikipedia:he', 'commons:commons'): {
u'שימוש חופשי': u'PD-self',
u'שימוש חופשי מוגן': u'Copyrighted free use',
u'שימוש חופשי מוגן בתנאי': u'Copyrighted free use provided that',
u'תמונה ישנה': u'PD-Israel',
u'ייחוס': u'Attribution',
u'לוגו ויקימדיה': u'Copyright by Wikimedia',
},
('wikipedia:hu', 'commons:commons'): {
u'Közkincs': u'PD',
u'Közkincs-régi': u'PD-old',
},
('wikipedia:pt', 'commons:commons'): {
u'Domínio público': u'PD',
},
}
class ImageTransferBot:
"""Image transfer bot."""
def __init__(self, generator, targetSite=None, interwiki=False,
keep_name=False, ignore_warning=False):
self.generator = generator
self.interwiki = interwiki
self.targetSite = targetSite
self.keep_name = keep_name
self.ignore_warning = ignore_warning
def transferImage(self, sourceImagePage):
"""
Download image and its description, and upload it to another site.
@return: the filename which was used to upload the image
"""
sourceSite = sourceImagePage.site
url = sourceImagePage.fileUrl().encode('utf-8')
pywikibot.output(u"URL should be: %s" % url)
# localize the text that should be printed on the image description page
try:
description = sourceImagePage.get()
# try to translate license templates
if (sourceSite.sitename(), self.targetSite.sitename()) in licenseTemplates:
for old, new in licenseTemplates[(sourceSite.sitename(),
self.targetSite.sitename())].items():
new = '{{%s}}' % new
old = re.compile('{{%s}}' % old)
description = textlib.replaceExcept(description, old, new,
['comment', 'math',
'nowiki', 'pre'])
description = i18n.translate(self.targetSite, copy_message,
fallback=True) % (sourceSite, description)
description += '\n\n'
description += sourceImagePage.getFileVersionHistoryTable()
# add interwiki link
if sourceSite.family == self.targetSite.family:
description += u'\r\n\r\n{0}'.format(sourceImagePage)
except pywikibot.NoPage:
description = ''
print("Image does not exist or description page is empty.")
except pywikibot.IsRedirectPage:
description = ''
print("Image description page is redirect.")
else:
bot = upload.UploadRobot(url=url, description=description,
targetSite=self.targetSite,
urlEncoding=sourceSite.encoding(),
keepFilename=self.keep_name,
verifyDescription=not self.keep_name,
ignoreWarning=self.ignore_warning)
# try to upload
targetFilename = bot.run()
if targetFilename and self.targetSite.family.name == 'commons' and \
self.targetSite.code == 'commons':
# upload to Commons was successful
reason = i18n.translate(sourceSite, nowCommonsMessage, fallback=True)
# try to delete the original image if we have a sysop account
if sourceSite.family.name in config.sysopnames and \
sourceSite.lang in config.sysopnames[sourceSite.family.name]:
if sourceImagePage.delete(reason):
return
if sourceSite.lang in nowCommonsTemplate and \
sourceSite.family.name in config.usernames and \
sourceSite.lang in config.usernames[sourceSite.family.name]:
# add the nowCommons template.
pywikibot.output(u'Adding nowCommons template to %s'
% sourceImagePage.title())
sourceImagePage.put(sourceImagePage.get() + '\n\n' +
nowCommonsTemplate[sourceSite.lang]
% targetFilename,
summary=nowCommonsMessage[sourceSite.lang])
def showImageList(self, imagelist):
for i in range(len(imagelist)):
image = imagelist[i]
print("-" * 60)
pywikibot.output(u"%s. Found image: %s"
% (i, image.title(asLink=True)))
try:
# Show the image description page's contents
pywikibot.output(image.get())
# look if page already exists with this name.
# TODO: consider removing this: a different image of the same
# name may exist on the target wiki, and the bot user may want
# to upload anyway, using another name.
try:
# Maybe the image is on the target site already
targetTitle = '%s:%s' % (self.targetSite.image_namespace(),
image.title().split(':', 1)[1])
targetImage = pywikibot.Page(self.targetSite, targetTitle)
targetImage.get()
pywikibot.output(u"Image with this name is already on %s."
% self.targetSite)
print("-" * 60)
pywikibot.output(targetImage.get())
sys.exit()
except pywikibot.NoPage:
# That's the normal case
pass
except pywikibot.IsRedirectPage:
pywikibot.output(
u"Description page on target wiki is redirect?!")
except pywikibot.NoPage:
break
print("=" * 60)
def run(self):
for page in self.generator:
if self.interwiki:
imagelist = []
for linkedPage in page.interwiki():
linkedPage = pywikibot.Page(linkedPage)
imagelist.extend(
linkedPage.imagelinks(
followRedirects=True))
elif page.isImage():
imagePage = pywikibot.FilePage(page.site, page.title())
imagelist = [imagePage]
else:
imagelist = list(page.imagelinks(followRedirects=True))
while len(imagelist) > 0:
self.showImageList(imagelist)
if len(imagelist) == 1:
# no need to query the user, only one possibility
todo = 0
else:
pywikibot.output(
u"Give the number of the image to transfer.")
todo = pywikibot.input(u"To end uploading, press enter:")
if not todo:
break
todo = int(todo)
if todo in range(len(imagelist)):
if imagelist[todo].fileIsShared():
pywikibot.output(
u'The image is already on Wikimedia Commons.')
else:
self.transferImage(imagelist[todo])
# remove the selected image from the list
imagelist = imagelist[:todo] + imagelist[todo + 1:]
else:
pywikibot.output(u'No such image number.')
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
pageTitle = None
gen = None
interwiki = False
keep_name = False
targetLang = None
targetFamily = None
local_args = pywikibot.handle_args(args)
for arg in local_args:
if arg == '-interwiki':
interwiki = True
elif arg.startswith('-keepname'):
keep_name = True
elif arg.startswith('-tolang:'):
targetLang = arg[8:]
elif arg.startswith('-tofamily:'):
targetFamily = arg[10:]
elif not pageTitle:
pageTitle = arg
if pageTitle:
page = pywikibot.Page(pywikibot.Site(), pageTitle)
gen = iter([page])
else:
pywikibot.showHelp()
return
if not targetLang and not targetFamily:
targetSite = pywikibot.Site('commons', 'commons')
else:
if not targetLang:
targetLang = pywikibot.Site().language
if not targetFamily:
targetFamily = pywikibot.Site().family
targetSite = pywikibot.Site(targetLang, targetFamily)
bot = ImageTransferBot(gen, interwiki=interwiki, targetSite=targetSite,
keep_name=keep_name)
bot.run()
if __name__ == "__main__":
main()
| {
"content_hash": "94299e0fe781c1d4db6542fa07bf78a1",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 92,
"avg_line_length": 42.06478873239437,
"alnum_prop": 0.540614745864863,
"repo_name": "xZise/pywikibot-core",
"id": "c88d07e0a225f0cde618f69ddc2f04d930cb1229",
"size": "15788",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/imagetransfer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "3640721"
}
],
"symlink_target": ""
} |
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0019_refactor_offering_referrals'),
]
operations = [
migrations.AlterField(
model_name='attribute',
name='key',
field=models.CharField(
max_length=255,
primary_key=True,
serialize=False,
validators=[django.core.validators.RegexValidator('^[a-zA-Z0-9_-]+$')],
),
),
migrations.AlterField(
model_name='attributeoption',
name='key',
field=models.CharField(
max_length=255,
validators=[django.core.validators.RegexValidator('^[a-zA-Z0-9_-]+$')],
),
),
migrations.AlterField(
model_name='categorycomponent',
name='type',
field=models.CharField(
help_text='Unique internal name of the measured unit, for example floating_ip.',
max_length=50,
validators=[django.core.validators.RegexValidator('^[a-zA-Z0-9_-]+$')],
),
),
migrations.AlterField(
model_name='offeringcomponent',
name='type',
field=models.CharField(
help_text='Unique internal name of the measured unit, for example floating_ip.',
max_length=50,
validators=[django.core.validators.RegexValidator('^[a-zA-Z0-9_-]+$')],
),
),
]
| {
"content_hash": "f76c93a93a8297f5e0a6a28eb4824e62",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 96,
"avg_line_length": 33.375,
"alnum_prop": 0.5237203495630461,
"repo_name": "opennode/nodeconductor-assembly-waldur",
"id": "1c96a5e652854b8e20fc7f702a45e3a661c9ee4a",
"size": "1652",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_mastermind/marketplace/migrations/0020_allow_minus_in_internal_names.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1624"
},
{
"name": "Python",
"bytes": "412263"
},
{
"name": "Shell",
"bytes": "2031"
}
],
"symlink_target": ""
} |
from django_messages_framework.api import get_messages
def messages(request):
"""
Returns a lazy 'messages' context variable.
"""
return {'messages': get_messages(request)}
| {
"content_hash": "e76293ba1568005231c3485136f8450a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 23.875,
"alnum_prop": 0.6910994764397905,
"repo_name": "none-da/zeshare",
"id": "51cf3b7c98e8ae826a2242b18363ff8bd268b220",
"size": "191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_messages_framework/context_processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "48335"
},
{
"name": "Python",
"bytes": "347229"
},
{
"name": "Shell",
"bytes": "321"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from neutron.agent.common import config
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
DEFAULT_BRIDGE_MAPPINGS = []
DEFAULT_VLAN_RANGES = []
DEFAULT_TUNNEL_RANGES = []
DEFAULT_TUNNEL_TYPES = []
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use.")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("Tunnel bridge to use.")),
cfg.StrOpt('int_peer_patch_port', default='patch-tun',
help=_("Peer patch port in integration bridge for tunnel "
"bridge.")),
cfg.StrOpt('tun_peer_patch_port', default='patch-int',
help=_("Peer patch port in tunnel bridge for integration "
"bridge.")),
cfg.IPOpt('local_ip', version=4,
help=_("Local IP address of tunnel endpoint.")),
cfg.ListOpt('bridge_mappings',
default=DEFAULT_BRIDGE_MAPPINGS,
help=_("List of <physical_network>:<bridge>. "
"Deprecated for ofagent.")),
cfg.BoolOpt('use_veth_interconnection', default=False,
help=_("Use veths instead of patch ports to interconnect the "
"integration bridge to physical bridges.")),
cfg.StrOpt('of_interface', default='ovs-ofctl', choices=['ovs-ofctl'],
help=_("OpenFlow interface to use.")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.BoolOpt('minimize_polling',
default=True,
help=_("Minimize polling by monitoring ovsdb for interface "
"changes.")),
cfg.IntOpt('ovsdb_monitor_respawn_interval',
default=constants.DEFAULT_OVSDBMON_RESPAWN,
help=_("The number of seconds to wait before respawning the "
"ovsdb monitor after losing communication with it.")),
cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES,
help=_("Network types supported by the agent "
"(gre and/or vxlan).")),
cfg.IntOpt('vxlan_udp_port', default=p_const.VXLAN_UDP_PORT,
help=_("The UDP port to use for VXLAN tunnels.")),
cfg.IntOpt('veth_mtu',
help=_("MTU size of veth interfaces")),
cfg.BoolOpt('l2_population', default=False,
help=_("Use ML2 l2population mechanism driver to learn "
"remote MAC and IPs and improve tunnel scalability.")),
cfg.BoolOpt('arp_responder', default=False,
help=_("Enable local ARP responder if it is supported. "
"Requires OVS 2.1 and ML2 l2population driver. "
"Allows the switch (when supporting an overlay) "
"to respond to an ARP request locally without "
"performing a costly ARP broadcast into the overlay.")),
cfg.BoolOpt('prevent_arp_spoofing', default=True,
help=_("Enable suppression of ARP responses that don't match "
"an IP address that belongs to the port from which "
"they originate. Note: This prevents the VMs attached "
"to this agent from spoofing, it doesn't protect them "
"from other devices which have the capability to spoof "
"(e.g. bare metal or VMs attached to agents without "
"this flag set to True). Spoofing rules will not be "
"added to any ports that have port security disabled. "
"For LinuxBridge, this requires ebtables. For OVS, it "
"requires a version that supports matching ARP "
"headers.")),
cfg.BoolOpt('dont_fragment', default=True,
help=_("Set or un-set the don't fragment (DF) bit on "
"outgoing IP packet carrying GRE/VXLAN tunnel.")),
cfg.BoolOpt('enable_distributed_routing', default=False,
help=_("Make the l2 agent run in DVR mode.")),
cfg.IntOpt('quitting_rpc_timeout', default=10,
help=_("Set new timeout in seconds for new rpc calls after "
"agent receives SIGTERM. If value is set to 0, rpc "
"timeout won't be changed"))
]
cfg.CONF.register_opts(ovs_opts, "OVS")
cfg.CONF.register_opts(agent_opts, "AGENT")
config.register_agent_state_opts_helper(cfg.CONF)
| {
"content_hash": "0800a7a00e2695f3c98b8890d8b90327",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 79,
"avg_line_length": 51.380434782608695,
"alnum_prop": 0.5796488258938015,
"repo_name": "cisco-openstack/neutron",
"id": "98b6210f937d68fad6510aa6d136e04c0e56052c",
"size": "5333",
"binary": false,
"copies": "7",
"ref": "refs/heads/staging/libertyplus",
"path": "neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7422643"
},
{
"name": "Shell",
"bytes": "13215"
}
],
"symlink_target": ""
} |
"""Utilities for integrating with IPython
These functions should probably reside in Jupyter and IPython repositories,
after which we can import them instead of having our own definitions.
"""
import atexit
import os
try:
import queue
except ImportError:
# Python 2
import Queue as queue
from subprocess import Popen
import sys
from threading import Thread
from uuid import uuid4
from tornado.gen import TimeoutError
from tornado.ioloop import IOLoop
from threading import Event
from IPython import get_ipython
from jupyter_client import BlockingKernelClient, write_connection_file
from jupyter_core.paths import jupyter_runtime_dir
OUTPUT_TIMEOUT = 10
def run_cell_remote(ip, kc, cell):
"""Run a cell on a KernelClient
Any output from the cell will be redisplayed in the local session.
"""
msg_id = kc.execute(cell)
in_kernel = getattr(ip, "kernel", False)
if in_kernel:
socket = ip.display_pub.pub_socket
session = ip.display_pub.session
parent_header = ip.display_pub.parent_header
while True:
try:
msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT)
except queue.Empty:
raise TimeoutError("Timeout waiting for IPython output")
if msg["parent_header"].get("msg_id") != msg_id:
continue
msg_type = msg["header"]["msg_type"]
content = msg["content"]
if msg_type == "status":
if content["execution_state"] == "idle":
# idle means output is done
break
elif msg_type == "stream":
stream = getattr(sys, content["name"])
stream.write(content["text"])
elif msg_type in ("display_data", "execute_result", "error"):
if in_kernel:
session.send(socket, msg_type, content, parent=parent_header)
else:
if msg_type == "error":
print("\n".join(content["traceback"]), file=sys.stderr)
else:
sys.stdout.write(content["data"].get("text/plain", ""))
else:
pass
def register_worker_magic(connection_info, magic_name="worker"):
"""Register a %worker magic, given connection_info.
Both a line and cell magic are registered,
which run the given cell in a remote kernel.
"""
ip = get_ipython()
info = dict(connection_info) # copy
key = info.pop("key")
kc = BlockingKernelClient(**connection_info)
kc.session.key = key
kc.start_channels()
def remote(line, cell=None):
"""Run the current cell on a remote IPython kernel"""
if cell is None:
# both line and cell magic
cell = line
run_cell_remote(ip, kc, cell)
remote.client = kc # preserve reference on kc, largely for mocking
ip.register_magic_function(remote, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote, magic_kind="cell", magic_name=magic_name)
def remote_magic(line, cell=None):
"""A magic for running code on a specified remote worker
The connection_info dict of the worker will be looked up
as the first positional arg to the magic.
The rest of the line (or the entire cell for a %%cell magic)
will be passed to the remote kernel.
Usage:
info = e.start_ipython(worker)[worker]
%remote info print(worker.data)
"""
# get connection info from IPython's user namespace
ip = get_ipython()
split_line = line.split(None, 1)
info_name = split_line[0]
if info_name not in ip.user_ns:
raise NameError(info_name)
connection_info = dict(ip.user_ns[info_name])
if not cell: # line magic, use the rest of the line
if len(split_line) == 1:
raise ValueError("I need some code to run!")
cell = split_line[1]
# turn info dict to hashable str for use as lookup key in _clients cache
key = ",".join(map(str, sorted(connection_info.items())))
session_key = connection_info.pop("key")
if key in remote_magic._clients:
kc = remote_magic._clients[key]
else:
kc = BlockingKernelClient(**connection_info)
kc.session.key = session_key
kc.start_channels()
kc.wait_for_ready(timeout=10)
remote_magic._clients[key] = kc
# actually run the code
run_cell_remote(ip, kc, cell)
# cache clients for re-use in remote magic
remote_magic._clients = {}
def register_remote_magic(magic_name="remote"):
"""Define the parameterized %remote magic
See remote_magic above for details.
"""
ip = get_ipython()
if ip is None:
return # do nothing if IPython's not running
ip.register_magic_function(remote_magic, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote_magic, magic_kind="cell", magic_name=magic_name)
def connect_qtconsole(connection_info, name=None, extra_args=None):
"""Open a QtConsole connected to a worker who has the given future
- identify worker with who_has
- start IPython kernel on the worker
- start qtconsole connected to the kernel
"""
runtime_dir = jupyter_runtime_dir()
if name is None:
name = uuid4().hex
path = os.path.join(runtime_dir, name + ".json")
write_connection_file(path, **connection_info)
cmd = ["jupyter", "qtconsole", "--existing", path]
if extra_args:
cmd.extend(extra_args)
Popen(cmd)
@atexit.register
def _cleanup_connection_file():
"""Cleanup our connection file when we exit."""
try:
os.remove(path)
except OSError:
pass
def start_ipython(ip=None, ns=None, log=None):
"""Start an IPython kernel in a thread
Parameters
----------
ip: str
The IP address to listen on (likely the parent object's ip).
ns: dict
Any names that should be injected into the IPython namespace.
log: logger instance
Hook up IPython's logging to an existing logger instead of the default.
"""
from IPython import get_ipython
if get_ipython() is not None:
raise RuntimeError("Cannot start IPython, it's already running.")
from zmq.eventloop.ioloop import ZMQIOLoop
from ipykernel.kernelapp import IPKernelApp
# save the global IOLoop instance
# since IPython relies on it, but we are going to put it in a thread.
save_inst = IOLoop.instance()
IOLoop.clear_instance()
zmq_loop = ZMQIOLoop()
zmq_loop.install()
# start IPython, disabling its signal handlers that won't work due to running in a thread:
app = IPKernelApp.instance(log=log)
# Don't connect to the history database
app.config.HistoryManager.hist_file = ":memory:"
# listen on all interfaces, so remote clients can connect:
if ip:
app.ip = ip
# disable some signal handling, logging
def noop():
return None
app.init_signal = noop
app.log_connection_info = noop
# start IPython in a thread
# initialization happens in the thread to avoid threading problems
# with the sqlite history
evt = Event()
def _start():
app.initialize([])
app.kernel.pre_handler_hook = noop
app.kernel.post_handler_hook = noop
app.kernel.start()
app.kernel.loop = IOLoop.instance()
# save self in the IPython namespace as 'worker'
# inject things into the IPython namespace
if ns:
app.kernel.shell.user_ns.update(ns)
evt.set()
zmq_loop.start()
zmq_loop_thread = Thread(target=_start)
zmq_loop_thread.daemon = True
zmq_loop_thread.start()
assert evt.wait(timeout=5), "IPython didn't start in a reasonable amount of time."
# put the global IOLoop instance back:
IOLoop.clear_instance()
save_inst.install()
return app
| {
"content_hash": "ec4fdbc2af590bc8f10aedbe81281b2d",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 94,
"avg_line_length": 30.96456692913386,
"alnum_prop": 0.6411951684678957,
"repo_name": "blaze/distributed",
"id": "1a99983378649adc8e084cfbc1e851ddf2f1fcc7",
"size": "7865",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "distributed/_ipython_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "511624"
},
{
"name": "Shell",
"bytes": "1120"
}
],
"symlink_target": ""
} |
"""Define API Loggers."""
import json
from google.protobuf.json_format import MessageToJson
class Logger(object):
"""Loggers represent named targets for log entries.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs
:type name: string
:param name: the name of the logger
:type client: :class:`gcloud.logging.client.Client`
:param client: A client which holds credentials and project configuration
for the logger (which requires a project).
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of default labels for entries written
via this logger.
"""
def __init__(self, name, client, labels=None):
self.name = name
self._client = client
self.labels = labels
@property
def client(self):
"""Clent bound to the logger."""
return self._client
@property
def project(self):
"""Project bound to the logger."""
return self._client.project
@property
def full_name(self):
"""Fully-qualified name used in logging APIs"""
return 'projects/%s/logs/%s' % (self.project, self.name)
@property
def path(self):
"""URI path for use in logging APIs"""
return '/%s' % (self.full_name,)
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`gcloud.logging.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:rtype: :class:`gcloud.logging.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
def batch(self, client=None):
"""Return a batch to use as a context manager.
:type client: :class:`gcloud.logging.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current topic.
:rtype: :class:`Batch`
:returns: A batch to use as a context manager.
"""
client = self._require_client(client)
return Batch(self, client)
def _make_entry_resource(self, text=None, info=None, message=None,
labels=None, insert_id=None, severity=None,
http_request=None):
"""Return a log entry resource of the appropriate type.
Helper for :meth:`log_text`, :meth:`log_struct`, and :meth:`log_proto`.
Only one of ``text``, ``info``, or ``message`` should be passed.
:type text: string or :class:`NoneType`
:param text: text payload
:type info: dict or :class:`NoneType`
:param info: struct payload
:type message: Protobuf message or :class:`NoneType`
:param message: protobuf payload
:type labels: dict or :class:`NoneType`
:param labels: labels passed in to calling method.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry
"""
resource = {
'logName': self.full_name,
'resource': {'type': 'global'},
}
if text is not None:
resource['textPayload'] = text
if info is not None:
resource['jsonPayload'] = info
if message is not None:
as_json_str = MessageToJson(message)
as_json = json.loads(as_json_str)
resource['protoPayload'] = as_json
if labels is None:
labels = self.labels
if labels is not None:
resource['labels'] = labels
if insert_id is not None:
resource['insertId'] = insert_id
if severity is not None:
resource['severity'] = severity
if http_request is not None:
resource['httpRequest'] = http_request
return resource
def log_text(self, text, client=None, labels=None, insert_id=None,
severity=None, http_request=None):
"""API call: log a text message via a POST request
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write
:type text: text
:param text: the log message.
:type client: :class:`gcloud.logging.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry
"""
client = self._require_client(client)
entry_resource = self._make_entry_resource(
text=text, labels=labels, insert_id=insert_id, severity=severity,
http_request=http_request)
client.logging_api.write_entries([entry_resource])
def log_struct(self, info, client=None, labels=None, insert_id=None,
severity=None, http_request=None):
"""API call: log a structured message via a POST request
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write
:type info: dict
:param info: the log entry information
:type client: :class:`gcloud.logging.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry
"""
client = self._require_client(client)
entry_resource = self._make_entry_resource(
info=info, labels=labels, insert_id=insert_id, severity=severity,
http_request=http_request)
client.logging_api.write_entries([entry_resource])
def log_proto(self, message, client=None, labels=None, insert_id=None,
severity=None, http_request=None):
"""API call: log a protobuf message via a POST request
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write
:type message: Protobuf message
:param message: the message to be logged
:type client: :class:`gcloud.logging.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry
"""
client = self._require_client(client)
entry_resource = self._make_entry_resource(
message=message, labels=labels, insert_id=insert_id,
severity=severity, http_request=http_request)
client.logging_api.write_entries([entry_resource])
def delete(self, client=None):
"""API call: delete all entries in a logger via a DELETE request
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs/delete
:type client: :class:`gcloud.logging.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
"""
client = self._require_client(client)
client.logging_api.logger_delete(self.project, self.name)
def list_entries(self, projects=None, filter_=None, order_by=None,
page_size=None, page_token=None):
"""Return a page of log entries.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: string
:param filter_: a filter expression. See:
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: string
:param order_by: One of :data:`gcloud.logging.ASCENDING` or
:data:`gcloud.logging.DESCENDING`.
:type page_size: int
:param page_size: maximum number of entries to return, If not passed,
defaults to a value set by the API.
:type page_token: string
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: tuple, (list, str)
:returns: list of :class:`gcloud.logging.entry.TextEntry`, plus a
"next page token" string: if not None, indicates that
more entries can be retrieved with another call (pass that
value as ``page_token``).
"""
log_filter = 'logName=%s' % (self.full_name,)
if filter_ is not None:
filter_ = '%s AND %s' % (filter_, log_filter)
else:
filter_ = log_filter
return self.client.list_entries(
projects=projects, filter_=filter_, order_by=order_by,
page_size=page_size, page_token=page_token)
class Batch(object):
"""Context manager: collect entries to log via a single API call.
Helper returned by :meth:`Logger.batch`
:type logger: :class:`gcloud.logging.logger.Logger`
:param logger: the logger to which entries will be logged.
:type client: :class:`gcloud.logging.client.Client`
:param client: The client to use.
"""
def __init__(self, logger, client):
self.logger = logger
self.entries = []
self.client = client
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.commit()
def log_text(self, text, labels=None, insert_id=None, severity=None,
http_request=None):
"""Add a text entry to be logged during :meth:`commit`.
:type text: string
:param text: the text entry
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry.
"""
self.entries.append(
('text', text, labels, insert_id, severity, http_request))
def log_struct(self, info, labels=None, insert_id=None, severity=None,
http_request=None):
"""Add a struct entry to be logged during :meth:`commit`.
:type info: dict
:param info: the struct entry
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry.
"""
self.entries.append(
('struct', info, labels, insert_id, severity, http_request))
def log_proto(self, message, labels=None, insert_id=None, severity=None,
http_request=None):
"""Add a protobuf entry to be logged during :meth:`commit`.
:type message: protobuf message
:param message: the protobuf entry
:type labels: dict or :class:`NoneType`
:param labels: (optional) mapping of labels for the entry.
:type insert_id: string or :class:`NoneType`
:param insert_id: (optional) unique ID for log entry.
:type severity: string or :class:`NoneType`
:param severity: (optional) severity of event being logged.
:type http_request: dict or :class:`NoneType`
:param http_request: (optional) info about HTTP request associated with
the entry.
"""
self.entries.append(
('proto', message, labels, insert_id, severity, http_request))
def commit(self, client=None):
"""Send saved log entries as a single API call.
:type client: :class:`gcloud.logging.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current batch.
"""
if client is None:
client = self.client
kwargs = {
'logger_name': self.logger.path,
'resource': {'type': 'global'},
}
if self.logger.labels is not None:
kwargs['labels'] = self.logger.labels
entries = []
for entry_type, entry, labels, iid, severity, http_req in self.entries:
if entry_type == 'text':
info = {'textPayload': entry}
elif entry_type == 'struct':
info = {'jsonPayload': entry}
elif entry_type == 'proto':
as_json_str = MessageToJson(entry)
as_json = json.loads(as_json_str)
info = {'protoPayload': as_json}
else:
raise ValueError('Unknown entry type: %s' % (entry_type,))
if labels is not None:
info['labels'] = labels
if iid is not None:
info['insertId'] = iid
if severity is not None:
info['severity'] = severity
if http_req is not None:
info['httpRequest'] = http_req
entries.append(info)
client.logging_api.write_entries(entries, **kwargs)
del self.entries[:]
| {
"content_hash": "8455ddfc759a3c9f23fa311152c7a637",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 95,
"avg_line_length": 37.053613053613056,
"alnum_prop": 0.5910291897332662,
"repo_name": "quoclieu/codebrew17-starving",
"id": "ad2d4b70ca77105083ad059921b62fefac48b5e9",
"size": "16493",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "env/lib/python3.5/site-packages/gcloud/logging/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "878455"
},
{
"name": "HTML",
"bytes": "510823"
},
{
"name": "JavaScript",
"bytes": "1522698"
},
{
"name": "Python",
"bytes": "5473"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from subprocess import *
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte
string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the
returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example::
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.::
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return output
| {
"content_hash": "995300512f7ec69b0df43d6db0e7d248",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 34.36842105263158,
"alnum_prop": 0.6408882082695253,
"repo_name": "mdboom/astropy-helpers",
"id": "a14df41bb303fa1a8c06977e718144da3da0552b",
"size": "1306",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "astropy_helpers/compat/_subprocess_py2/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""The JIP Pipeline module contains the classs and functions
used to create pipeline graphs
"""
import collections
import os
from jip.options import Option
from jip.tools import Tool
from jip.profiles import Profile
from jip.logger import getLogger
from jip.templates import render_template
import jip.tools
log = getLogger('jip.pipelines')
class Job(Profile):
"""Container class that wraps job meta-data.
The pipeline job extends the general :class:`jip.profiles.Profile`, and
extends it in a way that you can create new pipeline nodes from the job.
Those nodes will then hold a reference to the profile and all customization
on the profile will be applied to the node.
"""
def __init__(self, pipeline=None, **kwargs):
Profile.__init__(self, **kwargs)
self._pipeline = pipeline
self._node = None
self._in_pipeline_name = None
@classmethod
def from_profile(cls, profile, pipeline):
job = cls(pipeline=pipeline, **(profile.__dict__))
return job
def __getstate__(self):
data = self.__dict__.copy()
data['_pipeline'] = None
data['_node'] = None
return data
# override the name setter in order to delegate switching names to
# the jobs node
@Profile.name.setter
def name(self, name):
self._name = name
if self._in_pipeline_name is None:
self._in_pipeline_name = name
else:
name = self._in_pipeline_name
if self._node is not None and self._pipeline is not None:
self._pipeline._apply_node_name(self._node, name)
def _render_job_name(self, job):
ctx = {}
for o in job.tool.options:
ctx[o.name] = o
if self._in_pipeline_name:
name = self._in_pipeline_name
else:
name = self._node._name if self._node else self.name
if not name:
name = job._tool.name
name = render_template(
"%s%s" % ("" if not self.prefix else self.prefix, name), **ctx
)
# set name
if self._pipeline and self._node:
self._pipeline._apply_node_name(self._node, name)
return self._node.name
return name
def _render_name(self):
if not self._pipeline or not self._node:
return self.name
ctx = {}
for o in self._node._tool.options:
ctx[o.name] = o
if self._in_pipeline_name:
name = self._in_pipeline_name
else:
name = self._node._name
name = render_template(
"%s%s" % ("" if not self.prefix else self.prefix, name), **ctx
)
return name
def __call__(self, *args, **kwargs):
clone = Profile.__call__(self, *args, **kwargs)
clone._pipeline = self._pipeline
clone._in_pipeline_name = self._in_pipeline_name
if clone._in_pipeline_name is None:
clone._in_pipeline_name = clone.name
return clone
def run(self, *args, **kwargs):
"""Delegates to :py:meth:`Pipeline.run` and runs the specified tool
using this job environment configuration
:param args: args passed on to the pipeline ``run`` method
:param kwargs: kwargs passed on to the pipeline ``run`` method
:returns: the newly created node
:rtype: :class:`Node`
"""
if len(args) > 1:
raise ValueError("You can only pass one tool to a job run !")
node = args[0]
return self._pipeline.run(node, _job=self, **kwargs)
def bash(self, command, **kwargs):
"""Create a new ``bash`` job.
:param command: the bash command
:param kwargs: keyword arguments passed on the bash job
:returns: the newly created node
:rtype: :class:`Node`
"""
return self.run('bash', cmd=command, **kwargs)
class Pipeline(object):
"""A pipeline is a directed acyclic graph of Nodes and edges"""
def __init__(self, cwd=None):
self._nodes = {}
self._edges = set([])
self._job = Job(self, working_dir=cwd)
self._current_job = self._job
self._component_index = {}
self._cleanup_nodes = []
self._name = None
self.excludes = []
self._node_index = 0 # unique steadily increasing number
self._utils = None
self._cwd = self._job.working_dir
self._pipeline_name = None
def __getstate__(self):
data = {}
data['_job'] = self._job
data['_cwd'] = self._cwd
data['_current_job'] = self._current_job
data['_name'] = self._name
data['_node_index'] = self._node_index
data['_nodes'] = list(self._nodes.values())
return data
def __setstate__(self, data):
## update dict
self.__dict__['_cwd'] = data['_cwd']
self.__dict__['_edges'] = set([])
self.__dict__['_component_index'] = {}
self.__dict__['_cleanup_nodes'] = []
self.__dict__['excludes'] = []
self.__dict__['_utils'] = None
self.__dict__['_job'] = data['_job']
self.__dict__['_current_job'] = data['_current_job']
self.__dict__['_name'] = data['_name']
self.__dict__['_node_index'] = data['_node_index']
self.__dict__['_job']._pipeline = self
self.__dict__['_current_job']._pipeline = self
###############################################
# update nodes
###############################################
nodes = {}
for node in data['_nodes']:
node._graph = self
node._job._pipeline = self
node._job._node = node
tool = node._tool
nodes[tool] = node
for e in node._edges:
self._edges.add(e)
self.__dict__['_nodes'] = nodes
def __len__(self):
return len(self._nodes)
def __exit__(self, *args, **kwargs):
pass
def __enter__(self):
return self
@property
def utils(self):
if self._utils is None:
self._utils = jip.tools.PythonBlockUtils(None, locals())
self._utils._pipeline = self
return self._utils
@property
def edges(self):
"""Access all edges in the current pipeline graph as a list
of :class:`Edge`
:getter: get a list of all edges
:type: list of :class:`Edge`
"""
return list(self._edges)
def pipeline_name(self, name):
""" Set the user defined name of the pipeline
:param name: the user defined name of the pipeline
:type name: string
"""
if name is None:
return
self._pipeline_name = name
for n in self.nodes():
n._pipeline_name = name
def name(self, name):
"""Set the name of the pipeline and ensures that all
nodes in the pipeline reference the pipeline name.
:param name: the name of the pipeline
:type name: string
"""
if name is None:
return
for n in self.nodes():
n._pipeline = name
def job(self, *args, **kwargs):
"""Create a new job profile.
The job profile can be used to customize the execution behaviour
of a job. Calling this method will only create a new job profile,
but it will not be applied to any node in the graph. You can however
create nodes *from* the job profile, using :py:meth:`Job.run` or
:py:meth:`Job.bash`. These nodes will then get a copy of the job
profile and the profiles properties will be applied before job
execution.
:param args: args passed to :class:`Job`
:param kwargs: kwargs passed to :class:`Job`
:returns: new job profile
:rtype: :class:`Job`
"""
return self._job(*args, **kwargs)
def run(self, _tool_name, _job=None, **kwargs):
"""Find the tool specified by name and add it as a node to the pipeline
graph.
All additional keyword arguments are passed as option configuration to
the tool instance, allowing you to configure your tool when you create
it.
Note that the tools :py:meth:`~jip.tools.Tool.validate` method is
called here silently. Exceptions are caught and logged. This is
necessary to allow tools to initialize themselves when they are added
to a pipeline.
:param _tool_name: a :class:`~jip.tools.Tool` instance or a tool name
:param kwargs: all keyword arguments are passed to the tool as option
configurations
:returns: the newly added node
:rtype: :class:`Node`
:raises jip.tool.ToolNotFoundException: if the specified tool could not
be found
"""
if not isinstance(_tool_name, Tool):
from jip import find
tool = find(_tool_name)
else:
tool = _tool_name
node = self.add(tool, _job=_job)
# add options if specified in kwargs
def _add_opts(option_type):
def _add(opts, name, kwargs=None):
kwargs = kwargs if kwargs else {}
if option_type == "_inputs":
opts.add_input(name, **kwargs)
elif option_type == '_outputs':
opts.add_output(name, **kwargs)
else:
opts.add_option(name, **kwargs)
if option_type in kwargs:
for name, value in kwargs[option_type].iteritems():
opts = node._tool.options
if isinstance(value, dict):
# get and remove any value set here,
# otherwise this will influence the nargs
# setting of the new option. We set the
# value later anyways. We remove it from the
# dict only if nargs is set. That means that
# nargs will dominate
v = None
if "value" in value:
v = value["value"]
if "nargs" in value:
del value["value"]
_add(opts, name, value)
if v is not None:
node.set(name, v, allow_stream=False)
else:
_add(opts, name)
node.set(name, value, allow_stream=False)
del kwargs[option_type]
_add_opts("_inputs")
_add_opts("_outputs")
_add_opts("_options")
for k, v in kwargs.iteritems():
node.set(k, v, allow_stream=False)
return node
def bash(self, command, **kwargs):
"""Create a *bash* job that executes a bash command.
This us a fast way to build pipelines that execute shell commands. The
functions wraps the given command string in the *bash tool* that
is defined with ``input``, ``output``, and ``outfile``. Input and
output default to stdin and stdout.
:param command: the bash command to execute
:type command: string
:param kwargs: arguments passed into the context used to render the
bash command. ``input``, ``output``, and ``outfile`` are
passed as options to the *bash* tool that is used to
run the command
:returns: a new pipeline node that represents the bash job
:rtype: :class:`jip.pipelines.Node`
"""
return self.utils.bash(command, **kwargs)
def add(self, tool, _job=None):
"""Add a tool or a node to the pipeline. If the given value
is not a node, it is wrapped in a new node instance and then added
to the pipeline. The newly created node is returned.
Note that the nodes uniquely map to tool instances. You can not
add the same instance twice to the pipeline. Instead, no new
node will be added and the already existing node will be returned.
:param tool: the tool or node
:type tool: :class:`jip.tools.Tool` or :class:`Node`
:returns: the new node
:rtype: :class:`Node`
"""
if isinstance(tool, Node):
n = tool
self._nodes[n._tool] = n
n._pipeline = self._name if self._name else n._pipeline
n._graph = self
n._job._pipeline = self
n._node_index = self._node_index
self._node_index += 1
name = n._tool.name
if n._job.name:
name = n._job.name
log.debug("Add node | added %s", name)
self._apply_node_name(n, name)
return n
elif not tool in self._nodes:
n = Node(tool, self)
if not _job and tool._job:
# load profile from tool
_job = Job.from_profile(tool._job, self)
# set the job
job = _job() if _job else self._current_job()
n._tool._job = job
n._pipeline = self._name
n._pipeline_name = self._pipeline_name
n._job = job
job._node = n
self._nodes[tool] = n
# initialize the tool name using the tools' name
# initialize the node index
n._node_index = self._node_index
self._node_index += 1
name = tool.name if not job.name else job.name
log.debug("Add node | added %s", name)
self._apply_node_name(n, name)
# set pipeline profile
n._pipeline_profile = _job() if _job else self._current_job()
return self._nodes[tool]
def _apply_node_name(self, node, name):
"""Assign the given name to the node and make sure the
name is unique within the current set of nodes.
If there is another node with the same name, the nodes index will
be set accordingly.
:param node: the node
:param name: the new name
"""
name = name if name else "tool"
old_name = node._name
# set the new name and get all the nodes
# with the same name
node._name = name
node._index = -1
nodes_with_same_name = [i for i in self.nodes() if i._name == name]
if len(nodes_with_same_name) > 1:
# sort them by their index so we get the nodes in
# the same order they were added
nodes_with_same_name = sorted(nodes_with_same_name,
key=lambda x: x._node_index)
# there is more than one node with the same name.
# make sure the _index is set
for i, nn in enumerate(nodes_with_same_name):
nn._index = i
if old_name and old_name != name:
# node was renamed. Update all the "old" nodes and eventually
# reset their _index
old_nodes = [i for i in self.nodes() if i._name == old_name]
if len(old_nodes) == 1:
# single node left, reset the index
old_nodes[0]._index = -1
elif len(old_nodes) > 1:
# update the nodes _index, same strategy as above
old_nodes = sorted(old_nodes, key=lambda x: x._node_index)
for i, nn in enumerate(old_nodes):
nn._index = i
def get(self, name):
"""Find a node by tool or node name including its node index.
We search here through the node, searching for a node whose name equals
the given name. The full name consists if of the tool name and the node
index if there is are more nodes with the same name. A node index is
typically assigned and used after pipeline expansion, which means you
might have to append the correct index to the node you are looking for.
This is necessary because multi-plexing of the pipeline can not always
guarantee unique nodes names. The nodes might get duplicated based on
the input of the pipeline. Therefor a unique node index is appended to
the node name. You can expect the pipeline nodes and their names using
the :meth:`nodes` method and iterate it. Printing, or calling ``str``
will resolve the current node name.
If you assign a job name to the node, this will overwrite the node
name and will be used instead, but note that the same indexing rules
apply and if graph contains more than one node with the same name, the
node index will be appended to the node/job name.
If the index is appended, the node name always has the form
"<name>.<index>".
For example, without any special assignment, the node name defaults to
the name of the tool. If there is only one node with that name,
no modifications are applied and the node index is ignored::
>>> p = Pipeline()
>>> p.run('bash', cmd='ls')
bash
>>> p.expand()
False
>>> assert p.get("bash") is not None
:param name: node name
:returns: node name
:raises LookupError: if no such node exists
"""
for k, v in self._nodes.iteritems():
if v.name == name:
return v
raise LookupError("Node with name %s not found" % name)
def remove(self, tool, remove_links=True):
"""Remove the given tool or node from the pipeline graph.
:param tool: tool or node
"""
tool, _ = self.__resolve_node_tool(tool)
node = self._nodes[tool]
node_edges = list(node._edges)
# remove edges
for e in node_edges:
if remove_links:
e.remove_links()
if e in self._edges:
self._edges.remove(e)
if e in e._source._edges:
e._source._edges.remove(e)
if e in e._target._edges:
e._target._edges.remove(e)
# remove the node
del self._nodes[tool]
# update names
name = node._name
# find nodes with the same name
nodes = [n for n in self.nodes() if n._name == name]
if len(nodes) > 0:
# reapply the name to the first one, that should rename
# the other as well
self._apply_node_name(nodes[0], name)
def nodes(self):
"""Generator that yields the nodes of this pipeline
:returns nodes: the nodes of this pipeline
:rtype: list of Node
"""
for node in self._nodes.itervalues():
yield node
def __resolve_node_tool(self, source, target=None):
return (source if not isinstance(source, Node) else source._tool,
None if not target else
target if not isinstance(target, Node) else target._tool)
def add_edge(self, source, target):
"""Adds an edge between the source and the target if no
such edge exists. Otherwise the existing edge will be returned.
:param source: the source node or tool instance
:type source: :class:`Node` or :class:`~jip.tools.Tool`
:param target: the target node or tool instance
:type target: :class:`Node` or :class:`~jip.tools.Tool`
:returns: the edge between `source` and `target`
:raises LookupError: if the source or target node could not be found
"""
source, target = self.__resolve_node_tool(source, target)
source_node = None
try:
source_node = self._nodes[source]
except LookupError:
return None
target_node = self._nodes[target]
edge = Edge(source_node, target_node)
if edge in self._edges:
for known in self._edges:
if edge == known:
return known
log.debug("Add edge: %s->%s", source_node, target_node)
self._edges.add(edge)
if not edge in source_node._edges:
source_node._edges.append(edge)
if not edge in target_node._edges:
target_node._edges.append(edge)
return edge
def get_edge(self, source, target):
"""Returns the edge between `source` and `target` or raises a
``KeyError`` if no such edge exists.
:param source: the source node or tool instance
:type source: :class:`Node` or :class:`~jip.tools.Tool`
:param target: the target node or tool instance
:type target: :class:`Node` or :class:`~jip.tools.Tool`
:returns: the edge between `source` and `target`
:raises LookupError: if the source or target node could not be found
:raises KeyError: if no edge between source and target exists
"""
source, target = self.__resolve_node_tool(source, target)
source_node = self._nodes[source]
target_node = self._nodes[target]
edge = Edge(source_node, target_node)
if edge in self._edges:
for known in self._edges:
if edge == known:
return known
raise KeyError("No edge %s->%s found in graph!" % source, target)
def topological_order(self):
"""Generator function that yields the nodes in the graph in
topological order.
Please note that this function does **not** cache the order and
recalculates it on each call. If you know the pipeline graph will
not change any more and you have to iterate the nodes in order
more than once, you might want to cache the results::
>>> pipeline = Pipeline()
>>> ordered = list(pipeline.topological_order())
:returns: yields nodes in topological order
"""
count = {}
children = {}
for node in self.nodes():
count[node] = 0
for node in self.nodes():
_children = sorted(node.children(),
key=lambda j: j._node_index, reverse=True)
children[node] = _children
for successor in _children:
count[successor] += 1
ready = [node for node in self.nodes() if count[node] == 0]
ready = sorted(ready, key=lambda j: j._node_index, reverse=True)
while ready:
node = ready.pop(-1)
yield node
for successor in children[node]:
count[successor] -= 1
if count[successor] == 0:
ready.append(successor)
def groups(self):
"""Sorts the nodes in topological order and than groups nodes
together if they have a dependency and at least one of the dependency
options is set for streaming.
Yields lists of nodes. Each list represents a group of tools that
need to be executed in parallel to be able to pipe all streams.
"""
resolved = set([])
group = []
def resolve_streaming_dependencies(node):
for e in node.outgoing():
if e.has_streaming_link():
target = e._target
resolved.add(target)
group.append(target)
resolve_streaming_dependencies(target)
for in_edge in target.incoming():
if in_edge == e:
continue
source = in_edge._source
if source == node or source in resolved:
continue
if in_edge.has_streaming_link():
resolved.add(source)
group.append(source)
for node in self.topological_order():
if node in resolved:
continue
group.append(node)
resolved.add(node)
resolve_streaming_dependencies(node)
log.debug("Expand | Creating job group: %s", group)
yield group
group = []
def exclude(self, excludes):
"""Takes a list of node names and removes all nodes and their
successors from the graph.
:param excludes: list of node names
:type excludes: list of string
"""
if not excludes:
return
if not isinstance(excludes, (list, tuple)):
excludes = [excludes]
excludes = set(excludes)
# index the nodes by name
names2nodes = {}
for node in self.nodes():
if node._job.name is not None:
names2nodes[node._job.name] = node
def _recursive_remove(node, force=True):
parents = list(node.parents())
if force or len(parents) <= 1:
children = list(node.children())
map(lambda n: _recursive_remove(n, False),
children)
try:
log.info("Excluding node %s", node)
self.remove(node)
# check the children again, they might have becom invalid
for child in [c for c in children
if c._tool in self._nodes]:
try:
child._tool.validate()
except:
log.info("Forcing exclude of %s, "
"node became invalid",
child)
_recursive_remove(child)
except KeyError:
## ignore errors where the node was already removed
pass
for name in excludes:
if not name in names2nodes:
log.warn("Node marked for exclusing not found: %s", name)
else:
if isinstance(name, basestring) and not name in names2nodes:
node = names2nodes[name]
else:
node = name
_recursive_remove(names2nodes[name])
self._update_cleanup_nodes()
def skip(self, excludes):
"""Takes a list of node names or node instances and removes the node
and tries to connect parent and children of the node
:param excludes: list of node names
:type excludes: list of string
"""
if not excludes:
return
if not isinstance(excludes, (list, tuple)):
excludes = [excludes]
excludes = set(excludes)
# index the nodes by name
names2nodes = {}
for node in self.nodes():
if node._job.name is not None:
names2nodes[node._job.name] = node
for name in excludes:
if isinstance(name, basestring) and not name in names2nodes:
log.warn("Node marked for skip not found: %s", name)
else:
if isinstance(name, basestring):
node = names2nodes[name]
else:
node = name
parents = list(node.parents())
children = list(node.children())
if len(parents) > 0 and len(children) > 0:
# propagate all output files of the skip node
# pack to teh parent if the parent does not already
# write a file
out_files = list(node._tool.get_output_files())
if len(out_files) > 0:
for p in parents:
p_files = list(p._tool.get_output_files())
if len(p_files) == 0:
out_opt = p._tool.options.get_default_output()
p.set(out_opt.name, out_files)
for outedge in node.outgoing():
for link in outedge._links:
target_option = link[1]
for inedge in node.incoming():
for link in inedge._links:
source_option, stream = link[0], link[2]
outedge._target.set(target_option.name,
source_option,
append=True,
allow_stream=stream)
elif len(parents) == 0:
# no parent but at least one child.
in_opt = node._tool.options.get_default_input()
if in_opt:
for child in children:
child._tool.options.get_default_input().set(
in_opt.raw()
)
elif len(children) == 0:
# no children
opt = node._tool.options.get_default_output()
if opt:
for parent in parents:
parent._tool.options.get_default_output().set(
opt.raw()
)
self.remove(node)
self._update_cleanup_nodes()
def context(self, context):
"""Update the global context of the pipeline and add the values
from the given context
:param context: the context
"""
if context:
self.utils._update_global_env(context)
def expand(self, context=None, validate=True, _find_dup=True,
_check_fanout=True):
"""This modifies the current graph state and applies fan_out
operations on nodes with singleton options that are populated with
list.
An exception is raised in case a node has more than one option that
should be expanded and the number of configured elements is not the
same.
You can specify a ``context`` that will be used additionally to resolve
template variables and references in node options. This allows you
to give the template system access to your local environment. For
example::
>>> p = Pipeline()
>>> a = "myinput.txt"
>>> p.bash('wc -l ${a}')
bash
>>> p.expand(locals())
False
>>> assert p.get("bash").cmd.get() == 'wc -l myinput.txt'
:param validate: disable validation by setting this to false
:param context: specify a local context that is taken into account
in template and option rendering
"""
log.info("Expand | Expand Graph with %d nodes", len(self))
if context is not None:
self.context(context)
# add dependency edges between groups
# when a node in a group has an incoming edge from a parent
# outside of the group, add the edge also to any predecessor
# of the node within the group
self._expand_add_group_dependencies()
# check nodes for fanout
fanout_done = self._expand_fanout(_check_fanout)
# for all temp jobs, find a final non-temp target
# if we have targets, create a cleanup job, add
# all the temp job's output files and
# make it dependant on the temp nodes targets
self._expand_add_cleanup_jobs()
# iterate again to expand on pipeline of pipelines
self._expand_sub_pipelines(validate=validate)
if _find_dup:
# update node option values from links
# TODO add index to links and use it here
# render all ndoes
log.info("Expand | Render node context for %d nodes", len(self))
# setup nodes
#for n in self.nodes():
#n._tool.setup()
# render values
#_render_nodes(self, list(self.nodes()))
updated = set([])
cwd = self._cwd
if cwd is None:
cwd = os.getcwd()
for node in self.topological_order():
# ensure a working directory is set
if node._job.working_dir is None:
node._job.working_dir = cwd
node._tool.options.make_absolute(node._job.working_dir)
for link in [l for e in node.outgoing() for l in e._links]:
source = link[0]
target = link[1]
if not target in updated:
target._value = []
updated.add(target)
target._value.extend(source.value)
# detect duplicates and try to merge them
self._expand_merge_duplicates()
# apply names from global context
self._expand_name_jobs_by_context()
# applied and perform the final validation on all nodes
if _find_dup:
log.info("Expand | Validating nodes")
for node in self.nodes():
#node._tool.options.make_absolute(node._job.working_dir)
self._validate_node(node, silent=not validate)
#self._apply_node_name(node, node._name)
##########################################################
# transitive reduction of dependencies
#
# Currently quiet inefficient implementation of transitive
# reduction to remove edges that are redudant in the
# graph.
##########################################################
#def transitive_reduction(vertex, child, done):
#if child in done:
#return
#for outedge in child.outgoing():
#vertex._remove_edge_to(outedge._target)
#transitive_reduction(vertex, outedge._target, done)
#done.add(child)
#for j in self.nodes():
#done = set([])
#for child in j.outgoing():
#transitive_reduction(j, child._target, done)
log.info("Expand | Expansion finished. Nodes: %d", len(self))
return fanout_done
def _expand_add_group_dependencies(self):
"""Add dependency edges between groups
when a node in a group has an incoming edge from a parent
outside of the group, add the edge also to any predecessor
of the node within the group
"""
for group in self.groups():
gs = set(group)
first = group[0]
for node in group:
for parent in node.parents():
if parent not in gs:
## add an edge to the first of the group
log.debug("Expand | add group dependency %s->%s",
parent, first)
self.add_edge(parent, first)
def _expand_fanout(self, fanout):
"""Check all nodes in topological order if they need to
be fanned out and perform the fanout if necessary.
"""
if not fanout:
log.info("Expand | Fanout disabled, updating options")
return False
log.info("Expand | Checking for fanout in %d nodes", len(self))
fanout_done = False
for node in self.topological_order():
fanout_options = self._get_fanout_options(node)
if not fanout_options:
log.debug("Expand | No fanout options found for %s", node)
continue
# check that all fanout options have the same length
self._check_fanout_options(node, fanout_options)
# no exception was raised so we can actually do the
# fanout on the giben node
self._fan_out(node, fanout_options)
fanout_done = True
return fanout_done
def _expand_add_cleanup_jobs(self):
"""For all temp jobs, find a final non-temp target
if we have targets, create a cleanup job, add
all the temp job's output files and
make it dependant on the temp nodes targets
"""
log.info("Expand | Checking for temporary jobs")
temp_nodes = set([])
targets = set([])
temp_outputs = set([])
for node in self.nodes():
if node._job.temp:
temp_nodes.add(node)
if temp_nodes:
log.info("Expand | Check temporary outputs for %d job(s)",
len(temp_nodes))
for temp_node in temp_nodes:
for opt in temp_node._tool.options.get_by_type(
jip.options.TYPE_OUTPUT):
if not opt.is_stream():
temp_outputs.add(opt)
for child in temp_node.children():
if not child._job.temp:
targets.add(child)
log.info("Expand | Found %d temporary outputs and %d targets",
len(temp_outputs), len(targets))
if len(targets) > 0:
cleanup_node = self.run(
'cleanup',
files=list(temp_outputs)
)
cleanup_node.job.threads = 1
cleanup_node.job.temp = True
cleanup_node.job.name = "cleanup"
cleanup_node._name = "cleanup"
cleanup_node.files.dependency = True
#for target in (list(targets) + list(temp_nodes)):
for target in list(targets):
if not cleanup_node._pipeline and target._pipeline:
cleanup_node._pipeline = target._pipeline
cleanup_node.depends_on(target)
self._cleanup_nodes.append(cleanup_node)
def _expand_sub_pipelines(self, validate=True):
"""Search for sub-pipeline nodes and expand them"""
log.info("Expand | Checking nodes for sub-pipelines")
check_fanout = True
for node in self.topological_order():
log.debug("Expand | Checking %s for sub-pipeline", node)
# setup and render the subpipe node. We
# do this so that local variables used in the pipeline
# are rendered properly and the values are set accordingly
#if hasattr(node._tool, 'pipeline'):
node._tool.setup()
# reapply the pipeline profile so it precedes the tool profile
if node._pipeline_profile:
node._pipeline_profile.update(node._job, overwrite=False)
node._job.update(node._pipeline_profile)
# make the nodes options absolute (Issue #38)
node._tool.options.make_absolute(node._job.working_dir)
_render_nodes(self, [node])
node._tool.options.make_absolute(node._job.working_dir)
sub_pipe = node._tool.pipeline()
if sub_pipe is None:
continue
# validate the sub-pipeline
self._validate_node(node, silent=not validate)
# merge the nodes jobs with the sub-pipeline nodes
for sub_node in sub_pipe.nodes():
node._job.apply_to_node(sub_node)
sub_node._job.merge(node._job)
# render and apply the nodes name as pipeline
# name
node._name = node.job._render_name()
sub_pipe.name(node.name)
node._pipeline_name = self._pipeline_name
log.info("Expand | Expanding sub-pipeline from node %s", node)
if sub_pipe.excludes:
self.excludes.extend(sub_pipe.excludes)
check_fanout = sub_pipe.expand(validate=validate, _find_dup=False,
_check_fanout=check_fanout)
# apply pipeline profile
node._job.apply_to_pipeline(sub_pipe)
_render_jobs(sub_pipe, list(sub_pipe.nodes()))
# find all nodes in the sub_pipeline
# with no incoming edges and connect
# them to the current nodes incoming nodes
no_incoming = [n for n in sub_pipe.nodes()
if len(list(n.incoming())) == 0]
no_outgoing = [n for n in sub_pipe.nodes()
if len(list(n.outgoing())) == 0]
# add the sub_pipe
log.info("Expand | Adding %d nodes from sub-pipeline",
len(sub_pipe))
for sub_node in sub_pipe.topological_order():
log.debug("Expand | Adding sub-pipeline node %s", sub_node)
self.add(sub_node)
self._edges = self._edges.union(sub_pipe._edges)
for inedge in node.incoming():
for target in no_incoming:
log.debug("Expansion | add edge dependency on "
"no-incoming edge %s->%s",
inedge._source, target)
self.add_edge(inedge._source, target)
for outedge in node.outgoing():
for source in no_outgoing:
log.debug("Expansion | add edge dependency on "
"no-outgoing edge %s->%s",
source, outedge._target)
self.add_edge(source, outedge._target)
# establish links between resolved nodes and the current pipeline
# where before, the node was linked against a pipeline option.
#
# we look for both incoming and outgoing edges of the old node
# and check their links. If we find a link where source/target
# option is in one of the new sub_nodes _pipeline_options, we
# reestablish the link between the options, now linking between
# the nodes
self._expand_subpipe_resolve_outgoing(node, sub_pipe)
self._expand_subpipe_resolve_incoming(node, sub_pipe)
# setup the node and render values before we remove the node
node._tool.setup()
# reapply the pipeline profile so it precedes the tool profile
if node._pipeline_profile:
node._pipeline_profile.update(node._job, overwrite=False)
node._job.update(node._pipeline_profile)
_create_render_context(self, node._tool, node)
#_render_nodes(self, [node])
self.remove(node, remove_links=False)
self._cleanup_nodes.extend(sub_pipe._cleanup_nodes)
def _expand_subpipe_resolve_outgoing(self, node, sub_pipe):
"""Find outgoing edges from the sub pipe node that link
to nodes outside of the sub-pipe and are used in nodes inside the
sub-pipeline. If such edge/options combination exists, add an edge
with link from the node in the sub-pipeline to the node outside of
the sub-pipeline.
:param node: the sub pipeline parent node that is expanded
:param sub_pipe: the sub pipeline
"""
for outedge in node.outgoing():
for link in outedge._links:
self._expand_subpipe_resolve_outgoing_edge(
outedge, link, sub_pipe
)
def _expand_subpipe_resolve_incoming(self, node, sub_pipe):
"""Find incoming edges from the sub pipe node that link
to nodes outside of the sub-pipe and are used in nodes inside the
sub-pipeline. If such edge/options combination exists, add an edge
with link from the node in the sub-pipeline to the node outside of
the sub-pipeline.
:param node: the sub pipeline parent node that is expanded
:param sub_pipe: the sub pipeline
"""
for inedge in node.incoming():
for link in inedge._links:
self._expand_subpipe_resolve_incoming_edge(
inedge, link, sub_pipe
)
def _expand_subpipe_resolve_outgoing_edge(self, outedge, link, sub_pipe):
stream = link[2]
for sub_node in sub_pipe.nodes():
# find nodes who have _pipeline_options set
for po in sub_node._pipeline_options:
if po['source_option'] == link[0]:
edge = self.add_edge(sub_node, outedge._target)
source_option = sub_node._tool.options[po['option'].name]
target_option = link[1]
# udpate target option raw values
vs = []
for current in target_option._value:
if current == source_option:
vs.append(target_option)
else:
vs.append(current)
edge.add_link(source_option, target_option, stream)
def _expand_subpipe_resolve_incoming_edge(self, inedge, link, sub_pipe):
stream = link[2]
for sub_node in sub_pipe.nodes():
# find nodes who have _pipeline_options set
for po in sub_node._pipeline_options:
if po['source_option'] == link[1]:
edge = self.add_edge(inedge._source, sub_node)
target_option = sub_node._tool.options[po['option'].name]
source_option = link[0]
# udpate target option raw values
vs = []
for current in target_option._value:
if current == source_option:
vs.append(target_option)
else:
vs.append(current)
edge.add_link(source_option, target_option, stream)
def _expand_name_jobs_by_context(self):
"""If utils and a global context are available, apply variable
names to all nodes without names
"""
if self.utils and self.utils._global_env:
log.info("Expand | Applying node names from context")
for k, v in self.utils._global_env.iteritems():
if isinstance(v, Node):
if v._job.name is None:
v._job.name = k
def _expand_merge_duplicates(self):
"""Find nodes that reference the same tool and are configured
in the same way and merge them.
:returns: list of tuples with the duplicated nodes
"""
log.info("Expand | Searching for duplicates in %d nodes %d edges",
len(self), len(self._edges))
# Filter for nodes with no incoming sream and store them in
# a cache where we can retriev all nodes that reference
# the same tool quickly.
#
# To avoid complex comparisons between the options instances
# between the nodes, we cache a options hash value for eadh node
# here and use that one later for the comparison between
# possible merge candidates
node_hashes = {}
tools_2_nodes = collections.defaultdict(set)
for n in self.nodes():
if not n.has_incoming_stream():
opt_set = n._tool.options._get_value_set()
node_hashes[n] = hash(opt_set)
tools_2_nodes[n._tool._name].add(n)
## index all nodes without an incoming stream by their tool name
merged = 0
for nodes in [n for k, n in tools_2_nodes.iteritems()
if len(n) > 1]:
# the nodes set contains all nodes that reference the
# same tool
# Group them by checking that their options are the same
# hence its the same tool with the same configuration
while nodes:
n = nodes.pop()
group = set([n] + [m for m in nodes if m != n and
node_hashes[n] == node_hashes[m]])
# remove the group from the node set
# and add it to tue option groups that will be merged
nodes = nodes - group
size = len(group)
if size > 1:
log.info("Expand | Merging node group with %d nodes", size)
merged += size
self._merge_all(group)
log.info("Expand | Merged %d nodes", merged)
def _merge_all(self, nodes):
"""Merge all nodes in the given node list"""
n1 = nodes.pop()
for n2 in nodes:
for n2_edge in n2._edges:
if n2_edge._source == n2:
## OUTGOING EDGE
# get the other side
target = n2_edge._target
# change all incoming edge sources to n1
new_edge_set = []
for e in target._edges:
if e._source == n2:
e._source = n1
if not e in new_edge_set:
new_edge_set.append(e)
target._edges = new_edge_set
# set this edge source to n1
n2_edge._source = n1
else:
## INCOMING EDGE
# get the soruce side
source = n2_edge._source
# change all outgoing edge targets to n1
new_edge_set = []
for e in source._edges:
if e._target == n2:
e._target = n1
if not e in new_edge_set:
new_edge_set.append(e)
source._edges = new_edge_set
# set this edge target to n1
n2_edge._target = n1
# if such edge does not exist, add it to n1
if not n2_edge in n1._edges:
n1._edges.append(n2_edge)
# reset edges and remove the node
n2._edges = []
self.remove(n2)
self._apply_node_name(n1, n1._name)
return n1
def _validate_node(self, node, silent=False):
"""Validate the node and only raise an exaption
if silent is False
"""
try:
log.info("Pipeline | Validating node %s", node)
node._tool.validate()
except Exception as err:
if not silent:
raise
else:
log.debug("Node validation failed, but validation is "
"disabled: %s", err)
def validate(self):
"""Validate all nodes in the graph"""
log.info("Pipeline | Validating all nodes")
for n in self.nodes():
n._tool.validate()
def _update_cleanup_nodes(self):
for node in self._cleanup_nodes:
temp_outputs = set([])
for temp_node in [n for n in node.parents() if n._job.temp]:
for outfile in temp_node._tool.get_output_files():
temp_outputs.add(outfile)
node.files = list(temp_outputs)
def _fan_out(self, node, options):
"""Fan-out the given node using the given options
This will remove the node from the graph, clone it once
for each option value and re-add the clones
"""
# get all edges of the node and
# a list of list of values on which we fanout the
# node
_edges = list(node._edges)
values = [o.expand() for o in options]
log.info("Fanout | %s with %d options %d values",
node, len(options), len(values[0]))
incoming_links = []
incoming_edges = []
incoming_links_set = set([])
# collect incoming edges to the fan node that are covered
# by fan_options. We will not add them explicitly but
# one for each new clone
for e in _edges:
if e._target == node:
for link in e._links:
if link[1] in options and not link in incoming_links_set:
incoming_links.append(link)
incoming_edges.append(e)
incoming_links_set.add(link)
log.debug("Fanout | incoming edges: %s", incoming_edges)
log.debug("Fanout | incoming values: %s", values)
need_to_clone_edges = [e for e in _edges if not e in incoming_edges]
# clone the tool
for i, opts in enumerate(zip(*values)):
log.debug("Fanout | Clone node: %s", node)
cloned_tool = node._tool.clone()
# Add the cloned tool to the current graph
cloned_node = self.add(cloned_tool, _job=node._job)
cloned_node._pipeline = node._pipeline
log.debug("Fanout | Added new node: %s", cloned_node)
# reattach all edge that are not part of the fanout
# and copy the links. We will resolve the incoming edges
# in the next step
for edge in need_to_clone_edges:
self._fanout_add_edge(edge, node, cloned_node)
# now apply the options and create the incoming edges
for j, option in enumerate(options):
if i < len(incoming_edges):
e = incoming_edges[i]
new_edge = self.add_edge(e._source, cloned_node)
new_edge._group = e._group
for link in e._links:
link = new_edge.add_link(
link[0],
cloned_tool.options[link[1].name]
)
log.debug("Fanout | add link from inedge to edge: "
"%s [%s]", link, new_edge)
# get the new value
o = opts[j]
allow_stream = False
cloned_node.set(option.name, o, set_dep=False,
allow_stream=allow_stream)
#cloned_node._tool.options[option.name]._index = i
ost = str(o) if not isinstance(o, Option) else o._value
log.debug("Fanout | apply value %s: %s=%s", cloned_node,
option.name, ost)
ooo = cloned_node._tool.options[option.name]
ooo.dependency = option.dependency
ooo._index = i
#node._tool.setup()
_create_render_context(self, node._tool, node, None)
self.remove(node)
def _fanout_add_edge(self, edge, node, cloned_node):
"""Re-add edges to a cloned node."""
cloned_tool = cloned_node._tool
if edge._source == node:
# Outgoing edge
new_edge = self.add_edge(cloned_node, edge._target)
new_edge._group = edge._group
for link in edge._links:
link = new_edge.add_link(
cloned_tool.options[link[0].name],
link[1]
)
link[1]._value.append(cloned_tool.options[link[0].name])
log.debug("Fanout | add link to edge: %s [%s]",
new_edge, link)
elif edge._target == node:
# Incoming edge
new_edge = self.add_edge(edge._source, cloned_node)
new_edge._group = edge._group
for link in edge._links:
link = new_edge.add_link(
link[0],
cloned_tool.options[link[1].name]
)
log.debug("Fanout | add link to edge: %s [%s]",
link, new_edge)
def _get_fanout_options(self, node):
"""Find a list of options in the tool that take a single value
but are populated with more than one value
"""
if not isinstance(node._tool, Tool):
return []
fan_out = filter(lambda o: not o.is_list() and len(o) > 1,
node._tool.options)
return fan_out
def _check_fanout_options(self, node, fanout_options):
"""Takes a source node and a list of fanout options and
raises a ValueError if the fanout options do not contain the
same number of elements
"""
if not fanout_options:
return
num_values = len(fanout_options[0])
if not all(num_values == len(i) for i in fanout_options):
option_names = ["%s(%d)" % (o.name, len(o))
for o in fanout_options]
raise ValueError("Unable to fan out node '%s'! The number of "
"options used for fan out differers: %s" %
(node, ", ".join(option_names)))
def _dfs(self, node, visited=None):
if visited is None:
visited = set([])
if node in visited:
return visited
else:
visited.add(node)
for child in node.children():
self._dfs(child, visited)
for parent in node.parents():
self._dfs(parent, visited)
return visited
def _index_components(self):
all_nodes = set(self.nodes())
self._component_index = {}
components = []
for n in all_nodes:
c = self._dfs(n)
if len(c) > 0:
components.append(c)
idx = len(components)
for nc in c:
self._component_index[nc] = idx
return components
def __repr__(self):
return "[Nodes: %s, Edges: %s]" % (str(self._nodes), str(self._edges))
def _update_node_options(cloned_node, pipeline):
"""Render out all the options of the given node"""
ctx = {}
for o in cloned_node._tool.options:
ctx[o.name] = o # o.raw()
ctx['__node__'] = cloned_node
ctx['__pipeline__'] = pipeline
log.info("Expand | Rendering node options for : %s", cloned_node)
cloned_node._tool.options.render_context(ctx)
for o in cloned_node._tool.options:
o.value = o.value
cloned_node._tool.options.render_context(None)
class Node(object):
"""A single node in the pipeline graph.
If the node is linked to a :class:`jip.tools.Tool` instance, attributes are
resolved using teh tools options and the :class:`jip.options.Option`
instances are returned. This mechanism is used to automatically create
edges between tools when their options are referenced. These links are
stored on the :class:`.Edge`. If no edge exists, one will be created.
"""
def __init__(self, tool, graph, index=-1):
self.__dict__['_tool'] = tool
self.__dict__['_job'] = graph._current_job()
self.__dict__['_graph'] = graph
self.__dict__['_name'] = graph._name
self.__dict__['_pipeline'] = graph._name
self.__dict__['_pipeline_profile'] = None
self.__dict__['_pipeline_name'] = None
self.__dict__['_index'] = index
# the _node_index is an increasing counter that indicates
# the order in which nodes were added to the pipeline graph
self.__dict__['_node_index'] = 0
self.__dict__['_edges'] = []
self.__dict__['_pipeline_options'] = []
self.__dict__['_additional_input_options'] = set([])
self.__dict__['_embedded'] = []
def __getstate__(self):
data = self.__dict__.copy()
del data['_graph']
data['_tool'] = self._tool.name
data['_options'] = self._tool.options
for opt in self._pipeline_options:
opt['source'] = None
return data
def __setstate__(self, data):
opts = data['_options']
del data['_options']
self.__dict__.update(data)
tool = jip.find(data['_tool'])
self.__dict__['_tool'] = tool
tool._options = opts
tool._options.source = tool
for o in tool._options:
o.source = tool
def has_incoming_stream(self):
for e in self.incoming():
for l in e._links:
if l[2]:
return True
return False
@property
def job(self):
"""The nodes job profile
:getter: Returns the nodes job profile
:type: :class:`jip.pipelines.Job`
"""
return self._job
def on_success(self, tool=None, **kwargs):
"""Create an embedded pipeline that will be submitted
or executed after this node was successfully executed. The
function returns a tuple: (pipeline, node)
:param tool: the tool to run
:param kwargs: option arguments for the tool
:returns: tuple of (pipeline, node)
"""
pipeline = Pipeline(cwd=self._graph._cwd)
job = self._graph.job()
job._node = None
job._pipeline = pipeline
job._in_pipeline_name = None
pipeline._job = job
pipeline._current_job = job
self._embedded.append(pipeline)
if tool:
node = pipeline.run(tool, **kwargs)
return pipeline, node
else:
return pipeline
def pipeline_name(self, name):
"""Set the user defined name for the pipeline this node belongs to
"""
if name is None:
return
_pipeline_name = name
@property
def name(self):
"""Get a unique name for this node.
The unique name is created based on the job name. If no job name
is assigned, the tool name is used. If the new node name is not
unique within the pipeline context, the nodes index is appended to
the node.
:getter: returns a unique name for this node
:type: string
"""
name = self._name
if self._index >= 0:
return ".".join([name, str(self._index)])
return name
def children(self):
"""Yields a list of all children of this node
:returns: generator for all child nodes
:rtype: generator for :class:`Node`
"""
for edge in [e for e in self._edges if e._source == self]:
yield edge._target
def parents(self):
"""Yields a list of all parent nodes
:returns: generator for all parent nodes
:rtype: generator for :class:`Node`
"""
for edge in [e for e in self._edges if e._target == self]:
yield edge._source
def outgoing(self):
"""Yields all outgoing edges of this node
:returns: generator for all outgoing edges
:rtype: generator for :class:`Edge`
"""
for edge in [e for e in self._edges if e._source == self]:
yield edge
def incoming(self):
"""Yields all incoming edges of this node
:returns: generator for all incoming edges
:rtype: generator for :class:`Edge`
"""
for edge in [e for e in self._edges if e._target == self]:
yield edge
def has_incoming(self, other=None, link=None, stream=None, value=None):
"""Returns true if this node has an incoming edge where
the parent node is the given ``other`` node.
If *link* is specified, it has to but a tuple with the source
and the target option names. If specified the detected edge
has to carry the specified link. If ``stream`` is not None
the link is checked if its a streaming link or not.
If not other node is specified this returns True if this node
has any incoming edges.
If ``value`` is specified, the delegate value has to be equal to
the specified value.
You can use the incoming edge check like this::
node.has_incoming(other, ('output', 'input'), False, "data.txt")
This return True if the node ``node`` has an incoming edge from
the ``other`` node, the edge linkes ``other.output`` to ``node.input``,
no stream is passed and the actual value is "data.txt".
:param other: the potential parent node
:type other: :class:`Node`
:param link: optional tuple with source and target option names
:param stream: boolean that ensures that the link is streaming
or not, depending on the specified value
:param value: specify an optional value that is compared against the
delegated value
:returns: True if the edge exists
"""
if other is None:
return len(list(self.incoming())) > 0
edges = []
for i in self.incoming():
if i._source == other:
edges.append(i)
if not link:
return len(edges) > 0
def check_value(opt):
if value is None:
return True
return opt.raw() == value
# check for the link
for e in edges:
for l in e._links:
if l[0].name == link[0] and l[1].name == link[1]:
if stream is not None:
if stream != l[2]:
return False
return check_value(l[0])
else:
return check_value(l[0])
return False
def has_outgoing(self, other=None, link=None, stream=None, value=None):
"""Returns true if this node has an outgoing edge where
the child node is the given ``other`` node.
If *link* is specified, it has to but a tuple with the source
and the target option names. If specified the detected edge
has to carry the specified link. If ``stream`` is not None
the link is checked if its a streaming link or not.
If not other node is specified this returns True if this node
has any outgoing edges.
If ``value`` is specified, the delegate value has to be equal to
the specified value
You can use the outgoing edge check like this::
node.has_outgoing(other, ('output', 'input'), False, "data.txt")
This return True if the node ``node`` has an outgoing edge to
the ``other`` node, the edge links ``node.output`` to ``other.input``,
no stream is passed and the actual value is "data.txt".
:param other: the potential child node
:type other: :class:`Node`
:param link: optional tuple with source and target option names
:param stream: boolean that ensures that the link is streaming
or not, depending on the specified value
:param value: specify an optional value that is compared against the
delegated value
:returns: True if the edge exists
"""
if other is None:
return len(list(self.outgoing())) > 0
return other.has_incoming(other=self, link=link, stream=stream,
value=value)
def get_stream_input(self):
"""Returns a tuple of an options and a node, where the
options supports streams and the node is a parent node of this node. If
no such combination exists, a tuple of ``(None, None)`` will be
returned.
:returns: tuple of ``(Option, Node)`` where the option supports
streaming and the Node is a parent node.
"""
for inedge in self.incoming():
l = inedge.get_streaming_link()
if l is not None:
return l[1], inedge._source
return None, None
def get_incoming_link(self, option):
"""Find a link in the incoming edges where the target option
is the given option
:param option: the option to search for
:type option: :class:`jip.options.Option`
:returns: link instance for the given option or None
"""
for inedge in self.incoming():
for link in inedge._links:
if link._target == option:
return link
return None
def get_outgoing_link(self, option):
"""Find a link in the outgoing edges where the source option
is the given option
:param option: the option to search for
:type option: :class:`jip.options.Option`
:returns: link instance for the given option or None
"""
for edge in self.outgoing():
for link in edge._links:
if link._source == option:
return link
return None
def depends_on(self, *args):
"""Add an explicit dependency between this node and the other
node. The function accepts multiple values so you can specify multiple
parents at once.
:param args*: all parent nodes.
:type other: :class:`Node`
"""
for other in args:
self._graph.add_edge(other, self)
def group(self, other):
"""Groups this not and the other node. This creates a dependency
between this node and the other nodes and enables grouping so the
two nodes will be executed in the same job. The other node is returned
so group chains can be created easily.
:param other: the child node
:type other: Node
:returns other: the other node
"""
e = self._graph.add_edge(self, other)
e._group = True
return other
def _remove_edge_to(self, child):
edge = None
for e in self._edges:
if e._target == child:
edge = e
break
if edge:
self._edges.remove(edge)
edge._target._remove_edge_from(self)
self._graph._edges.remove(edge)
def _remove_edge_from(self, parent):
edge = None
for e in self._edges:
if e._source == parent:
edge = e
break
if edge:
self._edges.remove(edge)
#self._graph._edges.remove(edge)
####################################################################
# Operators
####################################################################
def __or__(self, other):
"""Create an edge from this node to the other node and
pipe the default output/input options between this node
and the other
"""
out = self._tool.options.get_default_output()
if isinstance(other, _NodeProxy):
for o in other._nodes:
self.__or__(o)
else:
inp = other._tool.options.get_default_input()
if out is not None and inp is not None:
other._set_option_value(inp, out, allow_stream=True,
append=inp.is_list())
else:
# just add an edge
self._graph.add_edge(self, other)
return other
def __gt__(self, other):
dout = self._tool.options.get_default_output()
if isinstance(other, Node):
# get the other tools input option and reverse
# the set so teh dependency is established in the
# right direction
def_in = other._tool.options.get_default_input()
log.debug("op node > :: %s->%s [%s<-%s]",
self, other, dout.name, def_in.name)
other.set(def_in.name, dout, allow_stream=False)
return other
# if the right hand side is an option, set
# the default output to the specified option
# and add a dependency. We have to call set on the
# other node in order to create the dependency in
# the right direction
if isinstance(other, Option):
if other.source in self._graph._nodes:
node = self._graph._nodes.get(other.source, None)
log.debug("op option > :: %s->%s [%s<-%s]",
self, node, dout.name, other.name)
node.set(other.name, dout)
return node
if dout is not None:
log.debug("op > :: %s(%s,%s)", self, dout.name, other)
self.set(dout.name, other)
else:
raise ValueError("Unknown default output for %s", self._tool)
return self
def __lt__(self, other):
din = self._tool.options.get_default_input()
if isinstance(other, Node):
# get the other tools output option and reverse
# the set so the dependency is established in the
# right direction
def_out = other._tool.options.get_default_output()
log.debug("op node < :: %s->%s [%s->%s]",
other, self, din.name, def_out.name)
self.set(din.name, def_out)
return other
# if the right hand side is an option, set
# the default output to the specified option
# and add a dependency. We have to call set on the
# other node in order to create the dependency in
# the right direction
if isinstance(other, Option):
if other.source in self._graph._nodes:
node = self._graph._nodes.get(other.source, None)
log.debug("op option < :: %s->%s [%s->%s]",
node, self, din.name, other.name)
self.set(din.name, other)
return node
if din is not None:
log.debug("op < :: %s(%s,%s)", self, other, din.name)
self.set(din.name, other)
else:
raise ValueError("Unknown default output for %s", self._tool)
return self
def __and__(self, other):
"""Create an edge from this node to the other node. No
options are passed.
"""
if isinstance(other, _NodeProxy):
for o in other._nodes:
self.__and__(o)
else:
# just add an edge
self._graph.add_edge(self, other)
return other
def __lshift__(self, other):
"""Create an edge from the other node to this node. No
options are delegated.
"""
if isinstance(other, _NodeProxy):
for o in other._nodes:
self.__lshift__(o)
elif isinstance(other, Node):
self._graph.add_edge(other, self)
else:
return self.__lt__(other)
return self
def __rshift__(self, other):
"""Create an edge from this node to the other node. No
options are delegated.
"""
if isinstance(other, _NodeProxy):
for o in other._nodes:
self.__rshift__(o)
elif isinstance(other, Node):
self._graph.add_edge(self, other)
else:
return self.__gt__(other)
return other
def __add__(self, other):
if isinstance(other, _NodeProxy):
other._nodes.append(self)
return other
return _NodeProxy([self, other])
def __sub__(self, other):
return self.group(other)
def __repr__(self):
return self.name
def __eq__(self, other):
return isinstance(other, Node) and other._tool == self._tool
def __hash__(self):
return self._tool.__hash__()
def __getattr__(self, name):
"""Resolves tool options"""
if isinstance(self._tool, Tool):
opts = self._tool.options
opt = opts[name]
if opt is None:
raise AttributeError("Option '%s' not found in %s" %
(name, self._tool))
return opt
raise AttributeError("Attribute not found: %s" % name)
def set(self, name, value, set_dep=False, allow_stream=True, append=False):
"""Set an option"""
opt = self.__getattr__(name)
self._set_option_value(opt, value, set_dep=set_dep,
allow_stream=allow_stream,
append=append)
def __setattr__(self, name, value):
if name in ["_job", "_index", "_pipeline",
"_node_index", "_name", "_graph", "_edges", '_tool',
'_pipeline_profile', '_pipeline_name']:
self.__dict__[name] = value
else:
self.set(name, value, allow_stream=False)
def _set_option_value(self, option, value, append=False,
allow_stream=True, set_dep=True):
if isinstance(value, (list, tuple)):
# if the value is a list, set the value to None
# first to clear the list and then append all
# values in the list
if not append:
self._set_singleton_option_value(option, None,
set_dep=set_dep,
allow_stream=allow_stream)
for single in value:
self._set_singleton_option_value(option, single, append=True,
allow_stream=allow_stream,
set_dep=set_dep)
else:
self._set_singleton_option_value(option, value, append=append,
allow_stream=allow_stream,
set_dep=set_dep)
def _set_singleton_option_value(self, option, value, append=False,
allow_stream=True, set_dep=True):
"""Set a single value of the given options and create
an edge and an option link on the edge if the value is another option
or another node.
:param option: the option of this nodes tool that will be updated
:type option: jip.options.Option
:param value: the new value
:type value: object
:param append: if set to true, the value is appended to the list of
values for the given option
:type value: bool
"""
def _force_stream(target, source):
return target.streamable and \
source.streamable and \
source.is_stream()
# single values
if isinstance(value, Node):
# in case the other value is a node, we try to
# get the node tools default output option
value = value._tool.options.get_default_output()
if isinstance(value, Option):
# the value is an options, we pass on the Options
# value and create/update the edge
option.dependency = True
#new_value = value.raw() if not append else value.value
new_value = value
if allow_stream or _force_stream(option, value):
if option.streamable and value.streamable:
# switch the value to the default
new_value = value.default
allow_stream = True
else:
allow_stream = False
if not append:
option.set(new_value)
else:
# we do not append directly as we want the value checks to
# happen
option.append(new_value)
# get the edge. The source is the values.source, which
# references the other options tool
edge = self._graph.add_edge(value.source, self._tool)
if edge:
edge.add_link(value, option, allow_stream=allow_stream)
else:
self._pipeline_options.append(
{"source": value.source, "option": option,
'source_option': value,
"stream": allow_stream}
)
else:
if not append:
if set_dep:
option.dependecy = False
option.set(value)
else:
option.append(value)
class _NodeProxy(object):
"""Create groups of nodes and proxy all functions except for the
pipe
"""
def __init__(self, nodes):
self._nodes = nodes
def __or__(self, other):
raise Exception("The | (pipe) operation is currently no supported "
"for a set of targets.")
def __and__(self, other):
for node in self._nodes:
node.__and__(other)
return other
def __lshift__(self, other):
for node in self._nodes:
node.__lshift__(other)
return other
def __rshift__(self, other):
for node in self._nodes:
node.__rshift__(other)
return self
def __add__(self, other):
return _NodeProxy([self, other])
def __sub__(self, other):
for n in self.nodes:
n.group(other)
return other
class Edge(object):
"""An edge in the pipeline graph connecting source and target nodes.
The edge has optional information about the jip.options.Options that
are connected through this edge.
The edge carries a set on links. Links are tuples of the form
(source_option, target_option, streamable).
In addition, the edges _group flag indicates that the two nodes linked
by the edge should form a job group.
"""
def __init__(self, source, target):
self._source = source
self._target = target
self._links = set([])
self._group = False
def add_link(self, source_option, target_option, allow_stream=True):
"""Create an option link between the source option and the target
options. This also checks that the source_option source is the
same as the edges source._tool and the target_option source is
the same as the edges target._tool
:param source_option: the source option
:type source_option: jip.options.Option
:param target_option: the target option
:type target_option: jip.options.Option
"""
if not source_option.source == self._source._tool:
raise ValueError("Linked source options.source != edge.source")
if not target_option.source == self._target._tool:
raise ValueError("Linked target option.source != edge.target")
if source_option.is_stream() and not target_option.streamable:
raise jip.tools.ValidationError(
self._target._tool,
"You are trying to establish a link between\n"
"%s and %s, using %s.%s delegated to %s.%s.\n"
"The source is a <<STREAM>> but the target "
"does not accept streamed input!\n"
"Try to set file name as output for %s." %
(self._source, self._target,
self._source, source_option.name,
self._target, target_option.name,
self._source))
link = (source_option, target_option,
allow_stream and source_option.streamable and
target_option.streamable)
log.debug("Add link on edge: %s->%s [%s->%s Stream:%s]",
self._source, self._target,
source_option.name, target_option.name, link[2])
self._links.add(link)
return link
def remove_links(self):
"""Iterate the links associated with this edge and make sure that
their values are unset in the target options.
"""
for link in self._links:
target_option = link[1]
try:
value = link[0].value
except:
value = link[0]._value
if not isinstance(value, (list, tuple)):
value = [value]
for v in value:
if v in target_option._value:
i = target_option._value.index(v)
del target_option._value[i]
if len(target_option._value) == 0:
target_option.dependency = False
def has_streaming_link(self):
"""Returns true if a least one link is set to streaming"""
l = self.get_streaming_link()
return l is not None
def get_streaming_link(self):
"""Returns the first link that is set to streaming"""
for l in self._links:
if l[2]:
return l
return None
def __eq__(self, other):
return isinstance(other, Edge) and other._source == self._source \
and other._target == self._target
def __hash__(self):
return hash((self._source, self._target))
def __repr__(self):
return "[%s->%s]" % (str(self._source), str(self._target))
def _create_render_context(pipeline, tool, node=None, nodes=None):
# add all nodes
if nodes:
ctx = dict(nodes)
else:
ctx = {}
# add all node options
for o in tool.options:
ctx[o.name] = o
# update global
if pipeline.utils:
ctx = pipeline.utils._update_context(ctx, base_node=node)
# store for each option
for o in tool.options:
o.render_context = ctx
return ctx
def _render_nodes(pipeline, nodes):
# create the base dict that contains all ndoes
nds = {}
for n in pipeline.nodes():
nds[n.name] = n
# create a context for each node and set it for each option
ctxs = {}
for node in nodes:
ctx = _create_render_context(pipeline, node._tool, node, nds)
ctxs[node] = ctx
def _create(tool):
return _create_render_context(pipeline, tool, None, nds)
# render out all node options
for node in nodes:
for o in node._tool.options:
_render_option(o, _create)
def _render_jobs(pipeline, nodes):
# create the base dict that contains all ndoes
nds = {}
for n in pipeline.nodes():
nds[n.name] = n
# create a context for each node and set it for each option
ctxs = {}
for node in nodes:
ctx = _create_render_context(pipeline, node._tool, node, nds)
ctxs[node] = ctx
def _create(tool):
return _create_render_context(pipeline, tool, None, nds)
# render out all node options
for node in nodes:
# render working dir
ctx = ctxs[node]
# update name and job
ctx['name'] = node._job.name
ctx['job'] = node._job
if node._job.dir:
node._job.working_dir = render_template(node._job.dir, **ctx)
if node._job.out:
node._job.out = render_template(node._job.out, **ctx)
if node._job.err:
node._job.err = render_template(node._job.err, **ctx)
def _render_option(option, create_fun):
if option.render_context is None:
create_fun(option.source)
rendered = option.value
return rendered
| {
"content_hash": "0ab74f1accc9ee941c761cd366d96386",
"timestamp": "",
"source": "github",
"line_count": 2227,
"max_line_length": 79,
"avg_line_length": 38.935339021104625,
"alnum_prop": 0.5343966600929546,
"repo_name": "thasso/pyjip",
"id": "4aa4c0e26f8b9c3e062654d27b8693d160af92d1",
"size": "86731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jip/pipelines.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8031"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "714627"
},
{
"name": "Shell",
"bytes": "800"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "supermilai.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "65d6e271fb0c1b0ae7db4a919f2f3e9e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.7142857142857143,
"repo_name": "lambdauser/supermilai",
"id": "3c29d3c69ec82f0e14731a67b0d4df6c9e0c7e43",
"size": "253",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2518"
},
{
"name": "JavaScript",
"bytes": "93"
},
{
"name": "Python",
"bytes": "22881"
}
],
"symlink_target": ""
} |
import codecs
from collections import OrderedDict
import itertools
import logging
from typing import Dict, List
import tqdm
from .instances.instance import Instance, TextInstance, IndexedInstance
from .instances.background_instance import BackgroundInstance
from .instances.labeled_background_instance import LabeledBackgroundInstance
from .instances.multiple_true_false_instance import MultipleTrueFalseInstance
from .data_indexer import DataIndexer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Dataset:
"""
A collection of Instances.
This base class has general methods that apply to all collections of Instances. That basically
is just methods that operate on sets, like merging and truncating.
"""
def __init__(self, instances: List[Instance]):
"""
A Dataset just takes a list of instances in its constructor. It's important that all
subclasses have an identical constructor to this (though possibly with different Instance
types). If you change the constructor, you also have to override all methods in this base
class that call the constructor, such as `merge()` and `truncate()`.
"""
self.instances = instances
def can_be_converted_to_multiple_choice(self):
"""
This method checks that dataset matches the assumptions we make about question data: that
it is a list of sentences corresponding to four-choice questions, with one correct answer
for every four instances.
So, specifically, we check that the number of instances is a multiple of four, and we check
that each group of four instances has exactly one instance with label True, and all other
labels are False (i.e., no None labels for validation data).
"""
for instance in self.instances:
if isinstance(instance, MultipleTrueFalseInstance):
return False
if len(self.instances) % 4 != 0:
return False
questions = zip(*[self.instances[i::4] for i in range(4)])
for question in questions:
question_labels = [instance.label for instance in question]
label_counts = {x: question_labels.count(x) for x in set(question_labels)}
if label_counts[True] != 1:
return False
if label_counts[False] != 3:
return False
return True
def merge(self, other: 'Dataset') -> 'Dataset':
"""
Combine two datasets. If you call try to merge two Datasets of the same subtype, you will
end up with a Dataset of the same type (i.e., calling IndexedDataset.merge() with another
IndexedDataset will return an IndexedDataset). If the types differ, this method currently
raises an error, because the underlying Instance objects are not currently type compatible.
"""
if type(self) is type(other):
return self.__class__(self.instances + other.instances)
else:
raise RuntimeError("Cannot merge datasets with different types")
def truncate(self, max_instances: int):
"""
If there are more instances than `max_instances` in this dataset, returns a new dataset
with a random subset of size `max_instances`. If there are fewer than `max_instances`
already, we just return self.
"""
if len(self.instances) <= max_instances:
return self
new_instances = [i for i in self.instances]
return self.__class__(new_instances[:max_instances])
class TextDataset(Dataset):
"""
A Dataset of TextInstances, with a few helper methods.
TextInstances aren't useful for much with Keras until they've been indexed. So this class just
has methods to read in data from a file and converting it into other kinds of Datasets.
"""
def __init__(self, instances: List[TextInstance]):
super(TextDataset, self).__init__(instances)
def to_indexed_dataset(self, data_indexer: DataIndexer) -> 'IndexedDataset':
'''
Converts the Dataset into an IndexedDataset, given a DataIndexer.
'''
indexed_instances = [instance.to_indexed_instance(data_indexer) for instance in tqdm.tqdm(self.instances)]
return IndexedDataset(indexed_instances)
def to_question_dataset(self) -> 'Dataset':
assert self.can_be_converted_to_multiple_choice()
questions = zip(*[self.instances[i::4] for i in range(4)])
question_instances = []
for question in questions:
question_instances.append(MultipleTrueFalseInstance(question))
return TextDataset(question_instances)
@staticmethod
def read_from_file(filename: str, instance_class, label: bool=None):
lines = [x.strip() for x in tqdm.tqdm(codecs.open(filename, "r",
"utf-8").readlines())]
return TextDataset.read_from_lines(lines, instance_class, label)
@staticmethod
def read_from_lines(lines: List[str], instance_class, label: bool=None):
instances = [instance_class.read_from_line(x, label) for x in lines]
labels = [(x.label, x) for x in instances]
labels.sort(key=lambda x: str(x[0]))
label_counts = [(label, len([x for x in group]))
for label, group in itertools.groupby(labels, lambda x: x[0])]
label_count_str = str(label_counts)
if len(label_count_str) > 100:
label_count_str = label_count_str[:100] + '...'
logger.info("Finished reading dataset; label counts: %s", label_count_str)
return TextDataset(instances)
@staticmethod
def read_background_from_file(dataset: 'TextDataset', filename: str, background_class):
"""
Reads a file formatted as background information and matches the background to the
sentences in the given dataset. The given dataset must have instance indices, so we can
match the background information in the file to the instances in the dataset.
The format for the file is assumed to be the following:
[sentence index][tab][background 1][tab][background 2][tab][...]
where [sentence index] corresponds to the index of one of the instances in `dataset`.
This code will also work if the data is formatted simply as [index][tab][sentence], one per
line.
"""
new_instances = OrderedDict()
for instance in dataset.instances:
background_instance = BackgroundInstance(instance, [])
new_instances[instance.index] = background_instance
for line in codecs.open(filename, "r", "utf-8"):
fields = line.strip().split("\t")
index = int(fields[0])
if index in new_instances:
instance = new_instances[index]
for sequence in fields[1:]:
instance.background.append(background_class.read_from_line(sequence, None))
return TextDataset(list(new_instances.values()))
@staticmethod
def read_labeled_background_from_file(dataset: 'TextDataset', filename: str) -> 'TextDataset':
"""
Reads a file formatted as labeled background information and matches the background to the
sentences in the given dataset. The given dataset must have instance indices, so we can
match the background information in the file to the instances in the dataset.
This is like read_background_from_file(), except we create LabeledBackgroundInstances
instead of BackgroundInstances.
The format for the file is assumed to be the following:
[sentence index][tab][correct background indices][tab][background 1][tab][background 2][tab][...]
where [sentence index] corresponds to the index of one of the instances in `dataset`, and
[correct background indices] is a comma-separated list of (0-indexed) integers, pointing to
the background sentences which are positive examples.
"""
new_instances = {}
for instance in dataset.instances:
background_instance = LabeledBackgroundInstance(instance, [], [])
new_instances[instance.index] = background_instance
for line in codecs.open(filename, "r", "utf-8"):
fields = line.strip().split("\t")
index = int(fields[0])
correct_background_indices = [int(x) for x in fields[1].split(',')]
if index in new_instances:
instance = new_instances[index]
instance.label = correct_background_indices
for sequence in fields[2:]:
instance.background.append(sequence)
return TextDataset(list(new_instances.values()))
class IndexedDataset(Dataset):
"""
A Dataset of IndexedInstances, with some helper methods.
IndexedInstances have text sequences replaced with lists of word indices, and are thus able to
be padded to consistent lengths and converted to training inputs.
"""
def __init__(self, instances: List[IndexedInstance]):
super(IndexedDataset, self).__init__(instances)
def max_lengths(self):
max_lengths = {}
lengths = [instance.get_lengths() for instance in self.instances]
if not lengths:
return max_lengths
for key in lengths[0]:
max_lengths[key] = max(x[key] if key in x else 0 for x in lengths)
return max_lengths
def pad_instances(self, max_lengths: Dict[str, int]=None):
"""
Make all of the IndexedInstances in the dataset have the same length by padding them (in
the front) with zeros.
If max_length is given for a particular dimension, we will pad all instances to that length
(including left-truncating instances if necessary). If not, we will find the longest
instance and pad all instances to that length. Note that max_lengths is a _List_, not an
int - there could be several dimensions on which we need to pad, depending on what kind of
instance we are dealing with.
This method _modifies_ the current object, it does not return a new IndexedDataset.
"""
# First we need to decide _how much_ to pad. To do that, we find the max length for all
# relevant padding decisions from the instances themselves. Then we check whether we were
# given a max length for a particular dimension. If we were, we use that instead of the
# instance-based one.
logger.info("Getting max lengths from instances")
instance_max_lengths = self.max_lengths()
logger.info("Instance max lengths: %s", str(instance_max_lengths))
lengths_to_use = {}
for key in instance_max_lengths:
if max_lengths and max_lengths[key] is not None:
lengths_to_use[key] = max_lengths[key]
else:
lengths_to_use[key] = instance_max_lengths[key]
logger.info("Now actually padding instances to length: %s", str(lengths_to_use))
for instance in tqdm.tqdm(self.instances):
instance.pad(lengths_to_use)
def as_training_data(self):
"""
Takes each IndexedInstance and converts it into (inputs, labels), according to the
Instance's as_training_data() method. Note that you might need to call numpy.asarray() on
the results of this; we don't do that for you, because the inputs might be complicated.
"""
inputs = []
labels = []
instances = self.instances
for instance in instances:
instance_inputs, label = instance.as_training_data()
inputs.append(instance_inputs)
labels.append(label)
return inputs, labels
| {
"content_hash": "48515d14e8c92f1370ebc2836115c4fc",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 114,
"avg_line_length": 47.54618473895582,
"alnum_prop": 0.6524199679026945,
"repo_name": "RTHMaK/RPGOne",
"id": "e7302fd88d029b52b85b119b93149ea790f12b27",
"size": "11839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_qa-master/deep_qa/data/dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Batchfile",
"bytes": "15029"
},
{
"name": "CSS",
"bytes": "41709"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Go",
"bytes": "287"
},
{
"name": "HTML",
"bytes": "633076"
},
{
"name": "JavaScript",
"bytes": "1128791"
},
{
"name": "Jupyter Notebook",
"bytes": "927247"
},
{
"name": "Makefile",
"bytes": "31756"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Matlab",
"bytes": "9454"
},
{
"name": "PHP",
"bytes": "708541"
},
{
"name": "PowerShell",
"bytes": "68503"
},
{
"name": "Python",
"bytes": "2278740"
},
{
"name": "Ruby",
"bytes": "1136"
},
{
"name": "Shell",
"bytes": "62555"
},
{
"name": "Smarty",
"bytes": "5752"
},
{
"name": "TeX",
"bytes": "34544"
}
],
"symlink_target": ""
} |
import docutils.nodes
from docutils.parsers.rst import Directive, directives
class IframeVideo(Directive):
DEFAULT_WIDTH = 700
DEFAULT_HEIGHT = 400
DEFAULT_ALIGN = 'left'
has_content = False
required_arguments = 1
optional_arguments = 0
option_spec = {
'height': directives.nonnegative_int,
'width': directives.nonnegative_int,
'align': lambda argument: directives.choice(argument, ('left', 'center', 'right')),
}
def run(self):
self.options['video_id'] = directives.uri(self.arguments[0])
if 'width' not in self.options:
self.options['width'] = self.DEFAULT_WIDTH
if 'height' not in self.options:
self.options['height'] = self.DEFAULT_HEIGHT
if 'align' not in self.options:
self.options['align'] = self.DEFAULT_ALIGN
return [docutils.nodes.raw('', self.html_code.format(**self.options), format='html')]
class Youtube(IframeVideo):
html_code = '<div class="align-{align} responsive-embed widescreen media">' \
'<iframe width="{width}" height="{height}" src="https://www.youtube.com/embed/{video_id}"' \
' frameborder="0" allowfullscreen></iframe></div>'
def register(context):
directives.register_directive('youtube', Youtube)
| {
"content_hash": "2b97d32473d4f38ca07970463d1be902",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 108,
"avg_line_length": 34.526315789473685,
"alnum_prop": 0.6364329268292683,
"repo_name": "akrylysov/yozuch",
"id": "42ca519da69fb5212e052f4c76b8fcf8b66f697d",
"size": "1312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yozuch/plugins/video.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7088"
},
{
"name": "HTML",
"bytes": "41851"
},
{
"name": "Python",
"bytes": "63646"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, DateTime, String, Integer, Float, ForeignKey, func, BLOB, Text, BigInteger, Index, SmallInteger
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class DbDonations(Base):
"""
Donations DB info
"""
__tablename__ = 'donations'
id = Column(BigInteger, primary_key=True)
created_at = Column(DateTime, nullable=True)
received_at = Column(DateTime, nullable=True, index=True)
donor = Column(String(255), nullable=True)
amount = Column(Float, nullable=True)
message = Column(Text, nullable=True)
page_idx = Column(Integer, nullable=False, default=0, index=True)
uid = Column(String(64), nullable=True, index=True)
skip_msg = Column(SmallInteger, nullable=False, default=0, index=True)
published_at = Column(DateTime, nullable=True)
publish_attempts = Column(Integer, nullable=False, default=0)
publish_last_attempt_at = Column(DateTime, nullable=True)
tweet_id = Column(String(64), nullable=True)
fb_skip_msg = Column(SmallInteger, nullable=False, default=0, index=True)
fb_published_at = Column(DateTime, nullable=True)
fb_publish_attempts = Column(Integer, nullable=False, default=0)
fb_publish_last_attempt_at = Column(DateTime, nullable=True)
fb_post_id = Column(String(92), nullable=True)
| {
"content_hash": "6190caee251ea29ee45e51205178d53c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 126,
"avg_line_length": 37.75675675675676,
"alnum_prop": 0.717967072297781,
"repo_name": "yolosec/zeman-parser",
"id": "da41734b41408dca12dace2b88164fd80f7bb8cf",
"size": "1444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zemanfeed/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "64955"
}
],
"symlink_target": ""
} |
"""
Project MCM - Micro Content Management
Metadata Extractor - identify content type, extract metadata with specific filter plugins
Copyright (C) <2016> Tim Waizenegger, <University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
import datetime
import dateutil
import logging
from mcm.metadataExtractor import ImportFilter
colprfx = "md_"
internal_table_objs = {"name": "SwiftInternal", "columns": ["content-type", "content-length", "last-modified"]}
internal_table_containers = {"name": "ContainerInfo", "columns": ["container-object-count", "container-bytes-used",
"container-meta-sdosheight",
"container-meta-sdosencryption",
"container-meta-sdoskeycascade",
"container-meta-sdosmasterkey",
"container-meta-sdospartitionbits"]}
def extract_metadata_from_container(container_info):
sqlVals = {}
sqlVals["extractionDate"] = datetime.datetime.now(dateutil.tz.tzutc())
sqlVals["containerName"] = container_info["name"]
sqlVals["name"] = ""
sqlVals["container_object_count"] = container_info.get("x-container-object-count")
sqlVals["container_bytes-used"] = container_info.get("x-container-bytes-used")
sqlVals["container-meta-sdosheight"] = container_info.get("x-container-meta-sosheight")
sqlVals["container-meta-sdosencryption"] = container_info.get("x-container-meta-sdosencryption")
sqlVals["container-meta-sdoskeycascade"] = container_info.get("x-container-meta-sdoskeycascade")
sqlVals["container-meta-sdosmasterkey"] = container_info.get("x-container-meta-sdosmasterkey")
sqlVals["container-meta-sdospartitionbits"] = container_info.get("x-container-meta-sdospartitionbits")
return sqlVals
def replicate_container_info(postgres_connection, container_info):
table_name = deriveTableName(internal_table_containers["name"])
for this_container in container_info:
values = extract_metadata_from_container(this_container)
execute_db_insert(values, table_name, postgres_connection)
def extractMetadataFromObject(conn, containerName, objectName, filterName, filterTags):
header = conn.head_object(container=containerName, obj=objectName, headers=None)
sqlVals = {}
sqlVals["extractionDate"] = datetime.datetime.now(dateutil.tz.tzutc())
sqlVals["containerName"] = containerName
sqlVals["name"] = objectName
for tag in filterTags:
if filterName == internal_table_objs["name"]:
key = tag
else:
key = 'x-object-meta-filter-{}-{}'.format(filterName, tag)
if key in header:
sqlVals[tag] = header[key]
else:
sqlVals[tag] = ""
return sqlVals
def replicateMetadata(conn, containerName, objectName, objectType, postgreConn):
# always insert into SwiftInternal table
sqlVals = extractMetadataFromObject(conn, containerName, objectName, internal_table_objs["name"],
internal_table_objs["columns"])
table = deriveTableName("SwiftInternal")
execute_db_insert(sqlVals, table, postgreConn)
# try to find content type specific filter
thisFilter = ImportFilter.getFilterForObjType(objectType)
# try to insert into content type specific table
sqlVals = extractMetadataFromObject(conn, containerName, objectName, thisFilter.myName, thisFilter.myValidTagNames)
# only insert into db if content type has been indentified and metadata has been extracted
if len(set(sqlVals.values())) > 3:
table = deriveTableName(thisFilter.myName)
execute_db_insert(sqlVals, table, postgreConn)
return "Metadata of {} in {} was replicated".format(objectName, containerName)
def deriveTableName(filterName):
return "filter_" + filterName
def execute_db_insert(sqlVals, tableName, postgreConn):
fields = colprfx + (", " + colprfx).join(sqlVals.keys())
fields = fields.replace("-", "_")
values = ', '.join(['%%(%s)s' % x for x in sqlVals])
query = "INSERT INTO " + tableName + " " + (
'(%s) VALUES (%s)' % (fields, values)) + " ON CONFLICT ON CONSTRAINT pk_" + tableName + " DO UPDATE SET "
updateSet = ""
for field in sqlVals.keys():
flabel = colprfx + field.replace("-", "_")
updateSet += flabel + "=excluded." + flabel + ","
query = query + updateSet.rstrip(',')
# print("sql: {}".format(query))
with postgreConn as conn:
with conn.cursor() as cursor:
cursor.execute(query, sqlVals)
postgreConn.commit()
def createTablesIfAbsent(postgreConn):
with postgreConn as conn:
with conn.cursor() as cursor:
create_internal_table(cursor, internal_table_objs)
create_internal_table(cursor, internal_table_containers)
for filter in ImportFilter.mapping.values():
create_filter_table(cursor, filter.myName, filter.myValidTagNames)
postgreConn.commit()
def create_internal_table(cursor, table_def):
tableName = deriveTableName(table_def["name"])
tags = table_def["columns"]
constants = "{0}containerName TEXT, {0}name TEXT, {0}extractionDate TEXT, ".format(colprfx)
cols = constants + colprfx + (" TEXT, " + colprfx).join(tags) + " TEXT"
cols = cols.replace("-", "_")
tableQuery = "CREATE TABLE IF NOT EXISTS " + tableName + " (" + cols + ", CONSTRAINT pk_" + tableName + " PRIMARY KEY ({0}containerName, {0}name)".format(
colprfx) + ")"
logging.info("Creating {}".format(tableName))
# print("sql: {}".format(tableQuery))
# cursor.execute("DROP TABLE "+tableName +" CASCADE")
cursor.execute(tableQuery)
def create_filter_table(cursor, filterName, tags):
tableName = deriveTableName(filterName)
constants = "{0}containerName TEXT, {0}name TEXT, {0}extractionDate TEXT, ".format(colprfx)
cols = constants + colprfx + (" TEXT, " + colprfx).join(tags) + " TEXT"
cols = cols.replace("-", "_")
fkey = ", FOREIGN KEY ({0}containerName, {0}name) REFERENCES filter_SwiftInternal ({0}containerName, {0}name)".format(
colprfx)
tableQuery = "CREATE TABLE IF NOT EXISTS " + tableName + " (" + cols + ", CONSTRAINT pk_" + tableName + " PRIMARY KEY ({0}containerName, {0}name)".format(
colprfx) + fkey + ")"
logging.info("Creating {}".format(tableName))
# print("sql: {}".format(tableQuery))
# cursor.execute("DROP TABLE "+tableName +" CASCADE")
cursor.execute(tableQuery)
| {
"content_hash": "4cc005caa88460b72eb0ce638dd65171",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 158,
"avg_line_length": 44.796052631578945,
"alnum_prop": 0.6484065207813189,
"repo_name": "timwaizenegger/mcm-metadataExtractor",
"id": "53ca20c24e46175526783198e6b429fdf29813d9",
"size": "6843",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mcm/metadataExtractor/Replicator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47959"
},
{
"name": "Shell",
"bytes": "582"
}
],
"symlink_target": ""
} |
import logging
import platform
import time
log = logging.getLogger("graphitesend")
class GraphiteStructuredFormatter(object):
'''Default formatter for GraphiteClient.
Provides structured metric naming based on a prefix, system name, group, etc
:param prefix: string added to the start of all metrics
:type prefix: Default: "systems."
:param group: string added to after system_name and before metric name
:param system_name: FDQN of the system generating the metrics
:type system_name: Default: current FDQN
:param suffix: string added to the end of all metrics
:param lowercase_metric_names: Toggle the .lower() of all metric names
:param fqdn_squash: Change host.example.com to host_example_com
:type fqdn_squash: True or False
:param clean_metric_name: Does GraphiteClient needs to clean metric's name
:type clean_metric_name: True or False
Feel free to implement your own formatter as any callable that accepts
def __call__(metric_name, metric_value, timestamp)
and emits text appropriate to send to graphite's text socket.
'''
cleaning_replacement_list = [
('(', '_'),
(')', ''),
(' ', '_'),
('-', '_'),
('/', '_'),
('\\', '_')
]
def __init__(self, prefix=None, group=None, system_name=None, suffix=None,
lowercase_metric_names=False, fqdn_squash=False, clean_metric_name=True):
prefix_parts = []
if prefix != '':
prefix = prefix or "systems"
prefix_parts.append(prefix)
if system_name != '':
system_name = system_name or platform.uname()[1]
if fqdn_squash:
system_name = system_name.replace('.', '_')
prefix_parts.append(system_name)
if group is not None:
prefix_parts.append(group)
prefix = '.'.join(prefix_parts)
prefix = prefix.replace('..', '.') # remove double dots
prefix = prefix.replace(' ', '_') # Replace ' 'spaces with _
if prefix:
prefix += '.'
self.prefix = prefix
self.suffix = suffix or ""
self.lowercase_metric_names = lowercase_metric_names
self._clean_metric_name = clean_metric_name
def clean_metric_name(self, metric_name):
"""
Make sure the metric is free of control chars, spaces, tabs, etc.
"""
if not self._clean_metric_name:
return metric_name
for _from, _to in self.cleaning_replacement_list:
metric_name = metric_name.replace(_from, _to)
return metric_name
'''Format a metric, value, and timestamp for use on the carbon text socket.'''
def __call__(self, metric_name, metric_value, timestamp=None):
if timestamp is None:
timestamp = time.time()
timestamp = int(timestamp)
if type(metric_value).__name__ in ['str', 'unicode']:
metric_value = float(metric_value)
log.debug("metric: '%s'" % metric_name)
metric_name = self.clean_metric_name(metric_name)
log.debug("metric: '%s'" % metric_name)
message = "%s%s%s %f %d\n" % (self.prefix, metric_name, self.suffix,
metric_value, timestamp)
# An option to lowercase the entire message
if self.lowercase_metric_names:
message = message.lower()
return message
| {
"content_hash": "36b8dae24e52dd9cedc60108dc0202aa",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 90,
"avg_line_length": 34.72727272727273,
"alnum_prop": 0.5986038394415357,
"repo_name": "PabloLefort/graphitesend",
"id": "ec5075940c57103a0053fbd8066665d540d319d7",
"size": "3438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphitesend/formatter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "48906"
}
],
"symlink_target": ""
} |
import time
import unittest
from rq import Queue
import frappe
from frappe.core.page.background_jobs.background_jobs import remove_failed_jobs
from frappe.utils.background_jobs import generate_qname, get_redis_conn
class TestBackgroundJobs(unittest.TestCase):
def test_remove_failed_jobs(self):
frappe.enqueue(method="frappe.tests.test_background_jobs.fail_function", queue="short")
# wait for enqueued job to execute
time.sleep(2)
conn = get_redis_conn()
queues = Queue.all(conn)
for queue in queues:
if queue.name == generate_qname("short"):
fail_registry = queue.failed_job_registry
self.assertGreater(fail_registry.count, 0)
remove_failed_jobs()
for queue in queues:
if queue.name == generate_qname("short"):
fail_registry = queue.failed_job_registry
self.assertEqual(fail_registry.count, 0)
def test_enqueue_at_front(self):
kwargs = {
"method": "frappe.handler.ping",
"queue": "short",
}
# give worker something to work on first so that get_position doesn't return None
frappe.enqueue(**kwargs)
# test enqueue with at_front=True
low_priority_job = frappe.enqueue(**kwargs)
high_priority_job = frappe.enqueue(**kwargs, at_front=True)
# lesser is earlier
self.assertTrue(high_priority_job.get_position() < low_priority_job.get_position())
def fail_function():
return 1 / 0
| {
"content_hash": "a37540f8b4db6f413114c8fd02968c36",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 89,
"avg_line_length": 27.632653061224488,
"alnum_prop": 0.7274741506646972,
"repo_name": "yashodhank/frappe",
"id": "6c7dda51f1510e8eeb2d827859a5ae4d934fb5ad",
"size": "1354",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/tests/test_background_jobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "320627"
},
{
"name": "GCC Machine Description",
"bytes": "2474"
},
{
"name": "HTML",
"bytes": "179539"
},
{
"name": "JavaScript",
"bytes": "1099003"
},
{
"name": "Python",
"bytes": "1430023"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
"""
Django settings for this project.
Generated by 'django-admin startproject' using Django 1.10.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from django.utils.translation import ugettext_lazy as _
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y&+f+)tw5sqkcy$@vwh8cy%y^9lwytqtn*y=lv7f9t39b(cufx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Enable this to additionally show the debug toolbar
# INTERNAL_IPS = ['localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
# djangocms_admin_style needs to be before django.contrib.admin!
# https://django-cms.readthedocs.org/en/develop/how_to/install.html#configuring-your-project-for-django-cms
'djangocms_admin_style',
# django defaults
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# django CMS additions
'django.contrib.sites',
'cms',
'menus',
'treebeard',
'sekizai',
# 'reversion',
# requirements for django-filer
'filer',
'easy_thumbnails',
'mptt',
# core addons
'djangocms_text_ckeditor',
'djangocms_link',
'djangocms_picture',
'djangocms_snippet',
'djangocms_style',
'djangocms_googlemap',
'djangocms_video',
'djangocms_audio',
]
MIDDLEWARE_CLASSES = (
# its recommended to place this as high as possible to enable apphooks
# to reload the page without loading unnecessary middlewares
'cms.middleware.utils.ApphookReloadMiddleware',
# django defaults
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# django CMS additions
'django.middleware.locale.LocaleMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
)
ROOT_URLCONF = 'src.urls'
WSGI_APPLICATION = 'src.wsgi.application'
# Templates
# https://docs.djangoproject.com/en/1.8/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# additional context processors for local development
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
# django CMS additions
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
# django CMS additions
'django.template.loaders.eggs.Loader',
],
'debug': DEBUG,
},
},
]
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# we use os.getenv to be able to override the default database settings for the docker setup
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# we need to add additional configuration for filer etc.
MEDIA_URL = '/media/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# django CMS settings
# http://docs.django-cms.org/en/latest/
SITE_ID = 1
CMS_PERMISSION = True
CMS_PLACEHOLDER_CONF = {}
CMS_PAGE_WIZARD_CONTENT_PLACEHOLDER = 'content'
# django CMS internationalization
# http://docs.django-cms.org/en/latest/topics/i18n.html
LANGUAGES = (
('en', _('English')),
)
# django CMS templates
# http://docs.django-cms.org/en/latest/how_to/templates.html
CMS_TEMPLATES = (
('content.html', 'Content'),
)
# CUSTOM
# Filer
THUMBNAIL_HIGH_RESOLUTION = True
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters'
)
# CKEditor
# DOCS: https://github.com/divio/djangocms-text-ckeditor
CKEDITOR_SETTINGS = {
'stylesSet': 'default:/static/js/addons/ckeditor.wysiwyg.js',
'contentsCss': ['/static/css/base.css'],
}
| {
"content_hash": "da2ffaf639e873ce6c7184a49ec8fd9d",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 111,
"avg_line_length": 27.32758620689655,
"alnum_prop": 0.6782334384858044,
"repo_name": "divio/django-cms-demo",
"id": "719ddded19354479942f2e2896ba5ec0a0cadf84",
"size": "6340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8449"
}
],
"symlink_target": ""
} |
from cStringIO import StringIO
from avro.datafile import DataFileReader, DataFileWriter
from avro.io import DatumReader, DatumWriter, BinaryDecoder, BinaryEncoder
import avro.schema
class AvroFileReader(DataFileReader):
def __init__(self, f, types=False):
if types:
raise RuntimeError('types not supported')
super(AvroFileReader, self).__init__(f, DatumReader())
class AvroFileWriter(DataFileWriter):
def __init__(self, f, schema_json):
schema = avro.schema.parse(schema_json)
super(AvroFileWriter, self).__init__(f, DatumWriter(), schema)
def write(self, datum):
return super(AvroFileWriter, self).append(datum)
class AvroDeserializer(object):
def __init__(self, schema_str):
schema = avro.schema.parse(schema_str)
self.reader = DatumReader(schema)
def deserialize(self, rec_bytes):
return self.reader.read(BinaryDecoder(StringIO(rec_bytes)))
class AvroSerializer(object):
def __init__(self, schema_str):
schema = avro.schema.parse(schema_str)
self.writer = DatumWriter(schema)
def serialize(self, record):
f = StringIO()
encoder = BinaryEncoder(f)
self.writer.write(record, encoder)
return f.getvalue()
| {
"content_hash": "25013424d657e4d1b8d7369090ea4488",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 74,
"avg_line_length": 27.67391304347826,
"alnum_prop": 0.6724273369992144,
"repo_name": "simleo/pydoop-features",
"id": "d73ef31d8fee9bc3ebdaa8be5f33ade03ed13f58",
"size": "1947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfeatures/pyavroc_emu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Emacs Lisp",
"bytes": "153"
},
{
"name": "Java",
"bytes": "49986"
},
{
"name": "Python",
"bytes": "367279"
},
{
"name": "Shell",
"bytes": "9643"
}
],
"symlink_target": ""
} |
import os
import numpy as np
from collections import OrderedDict
from md_utils.md_common import (InvalidDataError, warning)
# Constants #
MISSING_ATOMS_MSG = "Could not find lines for atoms ({}) in timestep {} in file: {}"
TSTEP_LINE = 'ITEM: TIMESTEP'
NUM_ATOM_LINE = 'ITEM: NUMBER OF ATOMS'
BOX_LINE = 'ITEM: BOX'
ATOMS_LINE = 'ITEM: ATOMS'
# Logic #
def find_atom_data(lammps_f, atom_ids):
"""Searches and returns the given file location for atom data for the given IDs.
:param lammps_f: The LAMMPS data file to search.
:param atom_ids: The set of atom IDs to collect.
:return: A nested dict of the atoms found keyed first by time step, then by atom ID.
:raises: InvalidDataError If the file is missing atom data or is otherwise malformed.
"""
tstep_atoms = OrderedDict()
tstep_box = {}
atom_count = len(atom_ids)
empty_dims = np.full(3, np.nan)
with open(lammps_f) as lfh:
file_name = os.path.basename(lammps_f)
tstep_id = None
box_dim = np.copy(empty_dims)
tstep_val = "(no value)"
for line in lfh:
if line.startswith(TSTEP_LINE):
try:
tstep_val = next(lfh).strip()
tstep_id = int(tstep_val)
# Todo: remove if never used
except ValueError as e:
raise InvalidDataError("Invalid timestep value {}: {}".format(tstep_val, e))
elif line.startswith(NUM_ATOM_LINE):
# not needed, so just move along
next(lfh)
elif line.startswith(BOX_LINE):
try:
for coord_id in range(len(box_dim)):
box_vals = list(map(float, next(lfh).strip().split()))
if len(box_vals) == 2:
box_dim[coord_id] = box_vals[1] - box_vals[0]
except (ValueError, KeyError) as e:
raise InvalidDataError("Invalid PBC value read on timestep {}: {}".format(tstep_val, e))
elif tstep_id is not None:
atom_lines = find_atom_lines(lfh, atom_ids, tstep_id, file_name)
if len(atom_lines) != atom_count:
try:
missing_atoms_err(atom_ids, atom_lines, tstep_id, file_name)
except InvalidDataError as e:
warning(e)
warning("Skipping timestep and continuing.")
else:
tstep_atoms[tstep_id] = atom_lines
tstep_box[tstep_id] = box_dim
tstep_id = None
box_dim = empty_dims
return tstep_atoms, tstep_box
def find_atom_lines(lfh, atom_ids, tstep_id, file_name):
"""Collects the atom data for the given IDs, returning a dict keyed by atom
ID with the atom value formatted as a six-element list containing:
* Molecule ID (int)
* Atom type (int)
* Charge (float)
* X (float)
* Y (float)
* Z (float)
:param lfh: A file handle for a LAMMPS file.
:param atom_ids: The set of atom IDs to collect.
:param tstep_id: The ID for the current time step.
:param file_name: the file name (basename) for the lammps file (for error printing)
:return: A dict of atom lines keyed by atom ID (int).
:raises: InvalidDataError If the time step section is missing atom data or
is otherwise malformed.
"""
found_atoms = {}
atom_count = len(atom_ids)
for aline in lfh:
s_line = aline.split()
if len(s_line) == 7 and int(s_line[0]) in atom_ids:
# noinspection PyTypeChecker
p_line = list(map(int, s_line[:3])) + list(map(float, s_line[-4:]))
found_atoms[p_line[0]] = p_line[1:]
if len(found_atoms) == atom_count:
return found_atoms
elif aline.startswith(TSTEP_LINE):
missing_atoms_err(atom_ids, found_atoms, tstep_id, file_name)
return found_atoms
# Exception Creators #
def missing_atoms_err(atom_ids, found_atoms, tstep_id, file_name):
"""Creates and raises an exception when the function is unable to find atom
data for all of the requested IDs.
:param atom_ids: The atoms that were requested.
:param found_atoms: The collection of atoms found.
:param tstep_id: The time step ID where the atom data was missing.
:param file_name: the file name with the time step ID where atom was missing.
:raises: InvalidDataError Describing the missing atom data.
"""
missing = map(str, atom_ids.difference(found_atoms.keys()))
raise InvalidDataError(MISSING_ATOMS_MSG.format(",".join(missing),
tstep_id, file_name))
| {
"content_hash": "25657851c9e02c86e1e006ea25e1d9c8",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 108,
"avg_line_length": 40.142857142857146,
"alnum_prop": 0.5842579024492359,
"repo_name": "cmayes/md_utils",
"id": "c37c3e0f09afccf6fd167a1e6560dc8fdfbd97a3",
"size": "4792",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "md_utils/lammps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1698"
},
{
"name": "Python",
"bytes": "115128"
},
{
"name": "Shell",
"bytes": "664"
},
{
"name": "Smarty",
"bytes": "211"
}
],
"symlink_target": ""
} |
import re
import time
import urllib
# 3p
import pymongo
# project
from checks import AgentCheck
from urlparse import urlsplit
from config import _is_affirmative
from distutils.version import LooseVersion # pylint: disable=E0611,E0401
DEFAULT_TIMEOUT = 30
GAUGE = AgentCheck.gauge
RATE = AgentCheck.rate
class MongoDb(AgentCheck):
"""
MongoDB agent check.
# Metrics
Metric available for collection are listed by topic as `MongoDb` class variables.
Various metric topics are collected by default. Others require the
corresponding option enabled in the check configuration file.
## Format
Metrics are listed with the following format:
```
metric_name -> metric_type
```
or
```
metric_name -> (metric_type, alias)*
```
* `alias` parameter is optional, if unspecified, MongoDB metrics are reported
with their original metric names.
# Service checks
Available service checks:
* `mongodb.can_connect`
Connectivity health to the instance.
* `mongodb.replica_set_member_state`
Disposition of the member replica set state.
"""
# Source
SOURCE_TYPE_NAME = 'mongodb'
# Service check
SERVICE_CHECK_NAME = 'mongodb.can_connect'
# Metrics
"""
Core metrics collected by default.
"""
BASE_METRICS = {
"asserts.msg": RATE,
"asserts.regular": RATE,
"asserts.rollovers": RATE,
"asserts.user": RATE,
"asserts.warning": RATE,
"backgroundFlushing.average_ms": GAUGE,
"backgroundFlushing.flushes": RATE,
"backgroundFlushing.last_ms": GAUGE,
"backgroundFlushing.total_ms": GAUGE,
"connections.available": GAUGE,
"connections.current": GAUGE,
"connections.totalCreated": GAUGE,
"cursors.timedOut": GAUGE,
"cursors.totalOpen": GAUGE,
"extra_info.heap_usage_bytes": RATE,
"extra_info.page_faults": RATE,
"fsyncLocked": GAUGE,
"globalLock.activeClients.readers": GAUGE,
"globalLock.activeClients.total": GAUGE,
"globalLock.activeClients.writers": GAUGE,
"globalLock.currentQueue.readers": GAUGE,
"globalLock.currentQueue.total": GAUGE,
"globalLock.currentQueue.writers": GAUGE,
"globalLock.lockTime": GAUGE,
"globalLock.ratio": GAUGE, # < 2.2
"globalLock.totalTime": GAUGE,
"indexCounters.accesses": RATE,
"indexCounters.btree.accesses": RATE, # < 2.4
"indexCounters.btree.hits": RATE, # < 2.4
"indexCounters.btree.misses": RATE, # < 2.4
"indexCounters.btree.missRatio": GAUGE, # < 2.4
"indexCounters.hits": RATE,
"indexCounters.misses": RATE,
"indexCounters.missRatio": GAUGE,
"indexCounters.resets": RATE,
"mem.bits": GAUGE,
"mem.mapped": GAUGE,
"mem.mappedWithJournal": GAUGE,
"mem.resident": GAUGE,
"mem.virtual": GAUGE,
"metrics.cursor.open.noTimeout": GAUGE,
"metrics.cursor.open.pinned": GAUGE,
"metrics.cursor.open.total": GAUGE,
"metrics.cursor.timedOut": RATE,
"metrics.document.deleted": RATE,
"metrics.document.inserted": RATE,
"metrics.document.returned": RATE,
"metrics.document.updated": RATE,
"metrics.getLastError.wtime.num": RATE,
"metrics.getLastError.wtime.totalMillis": RATE,
"metrics.getLastError.wtimeouts": RATE,
"metrics.operation.fastmod": RATE,
"metrics.operation.idhack": RATE,
"metrics.operation.scanAndOrder": RATE,
"metrics.operation.writeConflicts": RATE,
"metrics.queryExecutor.scanned": RATE,
"metrics.record.moves": RATE,
"metrics.repl.apply.batches.num": RATE,
"metrics.repl.apply.batches.totalMillis": RATE,
"metrics.repl.apply.ops": RATE,
"metrics.repl.buffer.count": GAUGE,
"metrics.repl.buffer.maxSizeBytes": GAUGE,
"metrics.repl.buffer.sizeBytes": GAUGE,
"metrics.repl.network.bytes": RATE,
"metrics.repl.network.getmores.num": RATE,
"metrics.repl.network.getmores.totalMillis": RATE,
"metrics.repl.network.ops": RATE,
"metrics.repl.network.readersCreated": RATE,
"metrics.repl.oplog.insert.num": RATE,
"metrics.repl.oplog.insert.totalMillis": RATE,
"metrics.repl.oplog.insertBytes": RATE,
"metrics.repl.preload.docs.num": RATE,
"metrics.repl.preload.docs.totalMillis": RATE,
"metrics.repl.preload.indexes.num": RATE,
"metrics.repl.preload.indexes.totalMillis": RATE,
"metrics.repl.storage.freelist.search.bucketExhausted": RATE,
"metrics.repl.storage.freelist.search.requests": RATE,
"metrics.repl.storage.freelist.search.scanned": RATE,
"metrics.ttl.deletedDocuments": RATE,
"metrics.ttl.passes": RATE,
"network.bytesIn": RATE,
"network.bytesOut": RATE,
"network.numRequests": RATE,
"opcounters.command": RATE,
"opcounters.delete": RATE,
"opcounters.getmore": RATE,
"opcounters.insert": RATE,
"opcounters.query": RATE,
"opcounters.update": RATE,
"opcountersRepl.command": RATE,
"opcountersRepl.delete": RATE,
"opcountersRepl.getmore": RATE,
"opcountersRepl.insert": RATE,
"opcountersRepl.query": RATE,
"opcountersRepl.update": RATE,
"oplog.logSizeMB": GAUGE,
"oplog.usedSizeMB": GAUGE,
"oplog.timeDiff": GAUGE,
"replSet.health": GAUGE,
"replSet.replicationLag": GAUGE,
"replSet.state": GAUGE,
"replSet.votes": GAUGE,
"replSet.voteFraction": GAUGE,
"stats.avgObjSize": GAUGE,
"stats.collections": GAUGE,
"stats.dataSize": GAUGE,
"stats.fileSize": GAUGE,
"stats.indexes": GAUGE,
"stats.indexSize": GAUGE,
"stats.nsSizeMB": GAUGE,
"stats.numExtents": GAUGE,
"stats.objects": GAUGE,
"stats.storageSize": GAUGE,
"uptime": GAUGE,
}
"""
Journaling-related operations and performance report.
https://docs.mongodb.org/manual/reference/command/serverStatus/#serverStatus.dur
"""
DURABILITY_METRICS = {
"dur.commits": GAUGE,
"dur.commitsInWriteLock": GAUGE,
"dur.compression": GAUGE,
"dur.earlyCommits": GAUGE,
"dur.journaledMB": GAUGE,
"dur.timeMs.dt": GAUGE,
"dur.timeMs.prepLogBuffer": GAUGE,
"dur.timeMs.remapPrivateView": GAUGE,
"dur.timeMs.writeToDataFiles": GAUGE,
"dur.timeMs.writeToJournal": GAUGE,
"dur.writeToDataFilesMB": GAUGE,
# Required version > 3.0.0
"dur.timeMs.commits": GAUGE,
"dur.timeMs.commitsInWriteLock": GAUGE,
}
"""
ServerStatus use of database commands report.
Required version > 3.0.0.
https://docs.mongodb.org/manual/reference/command/serverStatus/#serverStatus.metrics.commands
"""
COMMANDS_METRICS = {
# Required version >
"metrics.commands.count.failed": RATE,
"metrics.commands.count.total": GAUGE,
"metrics.commands.createIndexes.failed": RATE,
"metrics.commands.createIndexes.total": GAUGE,
"metrics.commands.delete.failed": RATE,
"metrics.commands.delete.total": GAUGE,
"metrics.commands.eval.failed": RATE,
"metrics.commands.eval.total": GAUGE,
"metrics.commands.findAndModify.failed": RATE,
"metrics.commands.findAndModify.total": GAUGE,
"metrics.commands.insert.failed": RATE,
"metrics.commands.insert.total": GAUGE,
"metrics.commands.update.failed": RATE,
"metrics.commands.update.total": GAUGE,
}
"""
ServerStatus locks report.
Required version > 3.0.0.
https://docs.mongodb.org/manual/reference/command/serverStatus/#server-status-locks
"""
LOCKS_METRICS = {
"locks.Collection.acquireCount.R": RATE,
"locks.Collection.acquireCount.r": RATE,
"locks.Collection.acquireCount.W": RATE,
"locks.Collection.acquireCount.w": RATE,
"locks.Collection.acquireWaitCount.R": RATE,
"locks.Collection.acquireWaitCount.W": RATE,
"locks.Collection.timeAcquiringMicros.R": RATE,
"locks.Collection.timeAcquiringMicros.W": RATE,
"locks.Database.acquireCount.r": RATE,
"locks.Database.acquireCount.R": RATE,
"locks.Database.acquireCount.w": RATE,
"locks.Database.acquireCount.W": RATE,
"locks.Database.acquireWaitCount.r": RATE,
"locks.Database.acquireWaitCount.R": RATE,
"locks.Database.acquireWaitCount.w": RATE,
"locks.Database.acquireWaitCount.W": RATE,
"locks.Database.timeAcquiringMicros.r": RATE,
"locks.Database.timeAcquiringMicros.R": RATE,
"locks.Database.timeAcquiringMicros.w": RATE,
"locks.Database.timeAcquiringMicros.W": RATE,
"locks.Global.acquireCount.r": RATE,
"locks.Global.acquireCount.R": RATE,
"locks.Global.acquireCount.w": RATE,
"locks.Global.acquireCount.W": RATE,
"locks.Global.acquireWaitCount.r": RATE,
"locks.Global.acquireWaitCount.R": RATE,
"locks.Global.acquireWaitCount.w": RATE,
"locks.Global.acquireWaitCount.W": RATE,
"locks.Global.timeAcquiringMicros.r": RATE,
"locks.Global.timeAcquiringMicros.R": RATE,
"locks.Global.timeAcquiringMicros.w": RATE,
"locks.Global.timeAcquiringMicros.W": RATE,
"locks.Metadata.acquireCount.R": RATE,
"locks.Metadata.acquireCount.W": RATE,
"locks.MMAPV1Journal.acquireCount.r": RATE,
"locks.MMAPV1Journal.acquireCount.w": RATE,
"locks.MMAPV1Journal.acquireWaitCount.r": RATE,
"locks.MMAPV1Journal.acquireWaitCount.w": RATE,
"locks.MMAPV1Journal.timeAcquiringMicros.r": RATE,
"locks.MMAPV1Journal.timeAcquiringMicros.w": RATE,
"locks.oplog.acquireCount.R": RATE,
"locks.oplog.acquireCount.w": RATE,
"locks.oplog.acquireWaitCount.R": RATE,
"locks.oplog.acquireWaitCount.w": RATE,
"locks.oplog.timeAcquiringMicros.R": RATE,
"locks.oplog.timeAcquiringMicros.w": RATE,
}
"""
TCMalloc memory allocator report.
"""
TCMALLOC_METRICS = {
"tcmalloc.generic.current_allocated_bytes": GAUGE,
"tcmalloc.generic.heap_size": GAUGE,
"tcmalloc.tcmalloc.aggressive_memory_decommit": GAUGE,
"tcmalloc.tcmalloc.central_cache_free_bytes": GAUGE,
"tcmalloc.tcmalloc.current_total_thread_cache_bytes": GAUGE,
"tcmalloc.tcmalloc.max_total_thread_cache_bytes": GAUGE,
"tcmalloc.tcmalloc.pageheap_free_bytes": GAUGE,
"tcmalloc.tcmalloc.pageheap_unmapped_bytes": GAUGE,
"tcmalloc.tcmalloc.thread_cache_free_bytes": GAUGE,
"tcmalloc.tcmalloc.transfer_cache_free_bytes": GAUGE,
}
"""
WiredTiger storage engine.
"""
WIREDTIGER_METRICS = {
"wiredTiger.cache.bytes currently in the cache": (GAUGE, "wiredTiger.cache.bytes_currently_in_cache"), # noqa
"wiredTiger.cache.failed eviction of pages that exceeded the in-memory maximum": (RATE, "wiredTiger.cache.failed_eviction_of_pages_exceeding_the_in-memory_maximum"), # noqa
"wiredTiger.cache.in-memory page splits": GAUGE,
"wiredTiger.cache.maximum bytes configured": GAUGE,
"wiredTiger.cache.maximum page size at eviction": GAUGE,
"wiredTiger.cache.modified pages evicted": GAUGE,
"wiredTiger.cache.pages read into cache": GAUGE, # noqa
"wiredTiger.cache.pages written from cache": GAUGE, # noqa
"wiredTiger.cache.pages currently held in the cache": (GAUGE, "wiredTiger.cache.pages_currently_held_in_cache"), # noqa
"wiredTiger.cache.pages evicted because they exceeded the in-memory maximum": (RATE, "wiredTiger.cache.pages_evicted_exceeding_the_in-memory_maximum"), # noqa
"wiredTiger.cache.pages evicted by application threads": RATE,
"wiredTiger.cache.tracked dirty bytes in the cache": (GAUGE, "wiredTiger.cache.tracked_dirty_bytes_in_cache"), # noqa
"wiredTiger.cache.unmodified pages evicted": GAUGE,
"wiredTiger.concurrentTransactions.read.available": GAUGE,
"wiredTiger.concurrentTransactions.read.out": GAUGE,
"wiredTiger.concurrentTransactions.read.totalTickets": GAUGE,
"wiredTiger.concurrentTransactions.write.available": GAUGE,
"wiredTiger.concurrentTransactions.write.out": GAUGE,
"wiredTiger.concurrentTransactions.write.totalTickets": GAUGE,
}
"""
Usage statistics for each collection.
https://docs.mongodb.org/v3.0/reference/command/top/
"""
TOP_METRICS = {
"commands.count": GAUGE,
"commands.time": GAUGE,
"getmore.count": GAUGE,
"getmore.time": GAUGE,
"insert.count": GAUGE,
"insert.time": GAUGE,
"queries.count": GAUGE,
"queries.time": GAUGE,
"readLock.count": GAUGE,
"readLock.time": GAUGE,
"remove.count": GAUGE,
"remove.time": GAUGE,
"total.count": GAUGE,
"total.time": GAUGE,
"update.count": GAUGE,
"update.time": GAUGE,
"writeLock.count": GAUGE,
"writeLock.time": GAUGE,
}
COLLECTION_METRICS = {
'collection.size': GAUGE,
'collection.avgObjSize': GAUGE,
'collection.count': GAUGE,
'collection.capped': GAUGE,
'collection.max': GAUGE,
'collection.maxSize': GAUGE,
'collection.storageSize': GAUGE,
'collection.nindexes': GAUGE,
'collection.indexSizes': GAUGE,
}
"""
Mapping for case-sensitive metric name suffixes.
https://docs.mongodb.org/manual/reference/command/serverStatus/#server-status-locks
"""
CASE_SENSITIVE_METRIC_NAME_SUFFIXES = {
'\.R\\b': ".shared",
'\.r\\b': ".intent_shared",
'\.W\\b': ".exclusive",
'\.w\\b': ".intent_exclusive",
}
"""
Metrics collected by default.
"""
DEFAULT_METRICS = {
'base': BASE_METRICS,
'durability': DURABILITY_METRICS,
'locks': LOCKS_METRICS,
'wiredtiger': WIREDTIGER_METRICS,
}
"""
Additional metrics by category.
"""
AVAILABLE_METRICS = {
'metrics.commands': COMMANDS_METRICS,
'tcmalloc': TCMALLOC_METRICS,
'top': TOP_METRICS,
'collection': COLLECTION_METRICS,
}
# Replication states
"""
MongoDB replica set states, as documented at
https://docs.mongodb.org/manual/reference/replica-states/
"""
REPLSET_MEMBER_STATES = {
0: ('STARTUP', 'Starting Up'),
1: ('PRIMARY', 'Primary'),
2: ('SECONDARY', 'Secondary'),
3: ('RECOVERING', 'Recovering'),
4: ('Fatal', 'Fatal'), # MongoDB docs don't list this state
5: ('STARTUP2', 'Starting up (forking threads)'),
6: ('UNKNOWN', 'Unknown to this replset member'),
7: ('ARBITER', 'Arbiter'),
8: ('DOWN', 'Down'),
9: ('ROLLBACK', 'Rollback'),
10: ('REMOVED', 'Removed'),
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Members' last replica set states
self._last_state_by_server = {}
# List of metrics to collect per instance
self.metrics_to_collect_by_instance = {}
self.collection_metrics_names = []
for (key, value) in self.COLLECTION_METRICS.iteritems():
self.collection_metrics_names.append(key.split('.')[1])
def get_library_versions(self):
return {"pymongo": pymongo.version}
def get_state_description(self, state):
if state in self.REPLSET_MEMBER_STATES:
return self.REPLSET_MEMBER_STATES[state][1]
else:
return 'Replset state %d is unknown to the Datadog agent' % state
def get_state_name(self, state):
if state in self.REPLSET_MEMBER_STATES:
return self.REPLSET_MEMBER_STATES[state][0]
else:
return 'UNKNOWN'
def _report_replica_set_state(self, state, clean_server_name, replset_name, agentConfig):
"""
Report the member's replica set state
* Submit a service check.
* Create an event on state change.
"""
last_state = self._last_state_by_server.get(clean_server_name, -1)
self._last_state_by_server[clean_server_name] = state
if last_state != state and last_state != -1:
return self.create_event(last_state, state, clean_server_name, replset_name, agentConfig)
def hostname_for_event(self, clean_server_name, agentConfig):
"""Return a reasonable hostname for a replset membership event to mention."""
uri = urlsplit(clean_server_name)
if '@' in uri.netloc:
hostname = uri.netloc.split('@')[1].split(':')[0]
else:
hostname = uri.netloc.split(':')[0]
if hostname == 'localhost':
hostname = self.hostname
return hostname
def create_event(self, last_state, state, clean_server_name, replset_name, agentConfig):
"""Create an event with a message describing the replication
state of a mongo node"""
status = self.get_state_description(state)
short_status = self.get_state_name(state)
last_short_status = self.get_state_name(last_state)
hostname = self.hostname_for_event(clean_server_name, agentConfig)
msg_title = "%s is %s for %s" % (hostname, short_status, replset_name)
msg = "MongoDB %s (%s) just reported as %s (%s) for %s; it was %s before." % (hostname, clean_server_name, status, short_status, replset_name, last_short_status)
self.event({
'timestamp': int(time.time()),
'source_type_name': self.SOURCE_TYPE_NAME,
'msg_title': msg_title,
'msg_text': msg,
'host': hostname,
'tags': [
'action:mongo_replset_member_status_change',
'member_status:' + short_status,
'previous_member_status:' + last_short_status,
'replset:' + replset_name,
]
})
def _build_metric_list_to_collect(self, additional_metrics):
"""
Build the metric list to collect based on the instance preferences.
"""
metrics_to_collect = {}
# Defaut metrics
for default_metrics in self.DEFAULT_METRICS.itervalues():
metrics_to_collect.update(default_metrics)
# Additional metrics metrics
for option in additional_metrics:
additional_metrics = self.AVAILABLE_METRICS.get(option)
if not additional_metrics:
if option in self.DEFAULT_METRICS:
self.log.warning(
u"`%s` option is deprecated."
u" The corresponding metrics are collected by default.", option
)
else:
self.log.warning(
u"Failed to extend the list of metrics to collect:"
u" unrecognized `%s` option", option
)
continue
self.log.debug(
u"Adding `%s` corresponding metrics to the list"
u" of metrics to collect.", option
)
metrics_to_collect.update(additional_metrics)
return metrics_to_collect
def _get_metrics_to_collect(self, instance_key, additional_metrics):
"""
Return and cache the list of metrics to collect.
"""
if instance_key not in self.metrics_to_collect_by_instance:
self.metrics_to_collect_by_instance[instance_key] = \
self._build_metric_list_to_collect(additional_metrics)
return self.metrics_to_collect_by_instance[instance_key]
def _resolve_metric(self, original_metric_name, metrics_to_collect, prefix=""):
"""
Return the submit method and the metric name to use.
The metric name is defined as follow:
* If available, the normalized metric name alias
* (Or) the normalized original metric name
"""
submit_method = metrics_to_collect[original_metric_name][0] \
if isinstance(metrics_to_collect[original_metric_name], tuple) \
else metrics_to_collect[original_metric_name]
metric_name = metrics_to_collect[original_metric_name][1] \
if isinstance(metrics_to_collect[original_metric_name], tuple) \
else original_metric_name
return submit_method, self._normalize(metric_name, submit_method, prefix)
def _normalize(self, metric_name, submit_method, prefix):
"""
Replace case-sensitive metric name characters, normalize the metric name,
prefix and suffix according to its type.
"""
metric_prefix = "mongodb." if not prefix else "mongodb.{0}.".format(prefix)
metric_suffix = "ps" if submit_method == RATE else ""
# Replace case-sensitive metric name characters
for pattern, repl in self.CASE_SENSITIVE_METRIC_NAME_SUFFIXES.iteritems():
metric_name = re.compile(pattern).sub(repl, metric_name)
# Normalize, and wrap
return u"{metric_prefix}{normalized_metric_name}{metric_suffix}".format(
normalized_metric_name=self.normalize(metric_name.lower()),
metric_prefix=metric_prefix, metric_suffix=metric_suffix
)
def _authenticate(self, database, username, password, use_x509, server_name, service_check_tags):
"""
Authenticate to the database.
Available mechanisms:
* Username & password
* X.509
More information:
https://api.mongodb.com/python/current/examples/authentication.html
"""
authenticated = False
try:
# X.509
if use_x509:
self.log.debug(
u"Authenticate `%s` to `%s` using `MONGODB-X509` mechanism",
username, database
)
authenticated = database.authenticate(username, mechanism='MONGODB-X509')
# Username & password
else:
authenticated = database.authenticate(username, password)
except pymongo.errors.PyMongoError as e:
self.log.error(
u"Authentication failed due to invalid credentials or configuration issues. %s", e
)
if not authenticated:
message = ("Mongo: cannot connect with config %s" % server_name)
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message=message)
raise Exception(message)
return authenticated
def _parse_uri(self, server, sanitize_username=False):
"""
Parses a MongoDB-formatted URI (e.g. mongodb://user:pass@server/db) and returns parsed elements
and a sanitized URI.
"""
parsed = pymongo.uri_parser.parse_uri(server)
username = parsed.get('username')
password = parsed.get('password')
db_name = parsed.get('database')
nodelist = parsed.get('nodelist')
auth_source = parsed.get('options', {}).get('authsource')
# Remove password (and optionally username) from sanitized server URI.
# To ensure that the `replace` works well, we first need to url-decode the raw server string
# since the password parsed by pymongo is url-decoded
decoded_server = urllib.unquote_plus(server)
clean_server_name = decoded_server.replace(password, "*" * 5) if password else decoded_server
if sanitize_username and username:
username_pattern = u"{}[@:]".format(re.escape(username))
clean_server_name = re.sub(username_pattern, "", clean_server_name)
return username, password, db_name, nodelist, clean_server_name, auth_source
def _collect_indexes_stats(self, instance, db, tags):
"""
Collect indexes statistics for all collections in the configuration.
This use the "$indexStats" command.
"""
for coll_name in instance.get('collections', []):
try:
for stats in db[coll_name].aggregate([{"$indexStats": {}}], cursor={}):
idx_tags = tags + [
"name:{0}".format(stats.get('name', 'unknown')),
"collection:{0}".format(coll_name),
]
self.gauge('mongodb.collection.indexes.accesses.ops', int(stats.get('accesses', {}).get('ops', 0)), idx_tags)
except Exception as e:
self.log.error("Could not fetch indexes stats for collection %s: %s", coll_name, e)
def check(self, instance):
"""
Returns a dictionary that looks a lot like what's sent back by
db.serverStatus()
"""
def total_seconds(td):
"""
Returns total seconds of a timedelta in a way that's safe for
Python < 2.7
"""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return (
lag.microseconds +
(lag.seconds + lag.days * 24 * 3600) * 10**6
) / 10.0**6
if 'server' not in instance:
raise Exception("Missing 'server' in mongo config")
# x.509 authentication
ssl_params = {
'ssl': instance.get('ssl', None),
'ssl_keyfile': instance.get('ssl_keyfile', None),
'ssl_certfile': instance.get('ssl_certfile', None),
'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),
'ssl_ca_certs': instance.get('ssl_ca_certs', None)
}
for key, param in ssl_params.items():
if param is None:
del ssl_params[key]
server = instance['server']
username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))
additional_metrics = instance.get('additional_metrics', [])
# Get the list of metrics to collect
collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics
metrics_to_collect = self._get_metrics_to_collect(
server,
additional_metrics
)
# Tagging
tags = instance.get('tags', [])
# ...de-dupe tags to avoid a memory leak
tags = list(set(tags))
if not db_name:
self.log.info('No MongoDB database found in URI. Defaulting to admin.')
db_name = 'admin'
service_check_tags = [
"db:%s" % db_name
]
service_check_tags.extend(tags)
# ...add the `server` tag to the metrics' tags only
# (it's added in the backend for service checks)
tags.append('server:%s' % clean_server_name)
if nodelist:
host = nodelist[0][0]
port = nodelist[0][1]
service_check_tags = service_check_tags + [
"host:%s" % host,
"port:%s" % port
]
timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000
try:
cli = pymongo.mongo_client.MongoClient(
server,
socketTimeoutMS=timeout,
connectTimeoutMS=timeout,
serverSelectionTimeoutMS=timeout,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,
**ssl_params)
# some commands can only go against the admin DB
admindb = cli['admin']
db = cli[db_name]
except Exception:
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags)
raise
# Authenticate
do_auth = True
use_x509 = ssl_params and not password
if not username:
self.log.debug(
u"A username is required to authenticate to `%s`", server
)
do_auth = False
if do_auth:
if auth_source:
self.log.info("authSource was specified in the the server URL: using '%s' as the authentication database", auth_source)
self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)
else:
self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)
try:
status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)
except Exception:
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags)
raise
else:
self.service_check(
self.SERVICE_CHECK_NAME,
AgentCheck.OK,
tags=service_check_tags)
if status['ok'] == 0:
raise Exception(status['errmsg'].__str__())
ops = db.current_op()
status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0
status['stats'] = db.command('dbstats')
dbstats = {}
dbstats[db_name] = {'stats': status['stats']}
# Handle replica data, if any
# See
# http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa
try:
data = {}
dbnames = []
replSet = admindb.command('replSetGetStatus')
if replSet:
primary = None
current = None
# need a new connection to deal with replica sets
setname = replSet.get('set')
cli_rs = pymongo.mongo_client.MongoClient(
server,
socketTimeoutMS=timeout,
connectTimeoutMS=timeout,
serverSelectionTimeoutMS=timeout,
replicaset=setname,
read_preference=pymongo.ReadPreference.NEAREST,
**ssl_params)
if do_auth:
if auth_source:
self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)
else:
self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)
# Replication set information
replset_name = replSet['set']
replset_state = self.get_state_name(replSet['myState']).lower()
tags.extend([
u"replset_name:{0}".format(replset_name),
u"replset_state:{0}".format(replset_state),
])
# Find nodes: master and current node (ourself)
for member in replSet.get('members'):
if member.get('self'):
current = member
if int(member.get('state')) == 1:
primary = member
# Compute a lag time
if current is not None and primary is not None:
if 'optimeDate' in primary and 'optimeDate' in current:
lag = primary['optimeDate'] - current['optimeDate']
data['replicationLag'] = total_seconds(lag)
if current is not None:
data['health'] = current['health']
data['state'] = replSet['myState']
if current is not None:
total = 0.0
cfg = cli_rs['local']['system.replset'].find_one()
for member in cfg.get('members'):
total += member.get('votes', 1)
if member['_id'] == current['_id']:
data['votes'] = member.get('votes', 1)
data['voteFraction'] = data['votes'] / total
status['replSet'] = data
# Submit events
self._report_replica_set_state(
data['state'], clean_server_name, replset_name, self.agentConfig
)
except Exception as e:
if "OperationFailure" in repr(e) and "replSetGetStatus" in str(e):
pass
else:
raise e
# If these keys exist, remove them for now as they cannot be serialized
try:
status['backgroundFlushing'].pop('last_finished')
except KeyError:
pass
try:
status.pop('localTime')
except KeyError:
pass
dbnames = cli.database_names()
self.gauge('mongodb.dbs', len(dbnames), tags=tags)
for db_n in dbnames:
db_aux = cli[db_n]
dbstats[db_n] = {'stats': db_aux.command('dbstats')}
# Go through the metrics and save the values
for metric_name in metrics_to_collect:
# each metric is of the form: x.y.z with z optional
# and can be found at status[x][y][z]
value = status
if metric_name.startswith('stats'):
continue
else:
try:
for c in metric_name.split("."):
value = value[c]
except KeyError:
continue
# value is now status[x][y][z]
if not isinstance(value, (int, long, float)):
raise TypeError(
u"{0} value is a {1}, it should be an int, a float or a long instead."
.format(metric_name, type(value)))
# Submit the metric
submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)
submit_method(self, metric_name_alias, value, tags=tags)
for st, value in dbstats.iteritems():
for metric_name in metrics_to_collect:
if not metric_name.startswith('stats.'):
continue
try:
val = value['stats'][metric_name.split('.')[1]]
except KeyError:
continue
# value is now status[x][y][z]
if not isinstance(val, (int, long, float)):
raise TypeError(
u"{0} value is a {1}, it should be an int, a float or a long instead."
.format(metric_name, type(val))
)
# Submit the metric
metrics_tags = (
tags +
[
u"cluster:db:{0}".format(st), # FIXME 6.0 - keep for backward compatibility
u"db:{0}".format(st),
]
)
submit_method, metric_name_alias = \
self._resolve_metric(metric_name, metrics_to_collect)
submit_method(self, metric_name_alias, val, tags=metrics_tags)
if _is_affirmative(instance.get('collections_indexes_stats')):
mongo_version = cli.server_info().get('version', '0.0')
if LooseVersion(mongo_version) >= LooseVersion("3.2"):
self._collect_indexes_stats(instance, db, tags)
else:
self.log.error("'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s", mongo_version)
# Report the usage metrics for dbs/collections
if 'top' in additional_metrics:
try:
dbtop = db.command('top')
for ns, ns_metrics in dbtop['totals'].iteritems():
if "." not in ns:
continue
# configure tags for db name and collection name
dbname, collname = ns.split(".", 1)
ns_tags = tags + ["db:%s" % dbname, "collection:%s" % collname]
# iterate over DBTOP metrics
for m in self.TOP_METRICS:
# each metric is of the form: x.y.z with z optional
# and can be found at ns_metrics[x][y][z]
value = ns_metrics
try:
for c in m.split("."):
value = value[c]
except Exception:
continue
# value is now status[x][y][z]
if not isinstance(value, (int, long, float)):
raise TypeError(
u"{0} value is a {1}, it should be an int, a float or a long instead."
.format(m, type(value))
)
# Submit the metric
submit_method, metric_name_alias = \
self._resolve_metric(m, metrics_to_collect, prefix="usage")
submit_method(self, metric_name_alias, value, tags=ns_tags)
except Exception as e:
self.log.warning('Failed to record `top` metrics %s' % str(e))
if 'local' in dbnames: # it might not be if we are connectiing through mongos
# Fetch information analogous to Mongo's db.getReplicationInfo()
localdb = cli['local']
oplog_data = {}
for ol_collection_name in ("oplog.rs", "oplog.$main"):
ol_options = localdb[ol_collection_name].options()
if ol_options:
break
if ol_options:
try:
oplog_data['logSizeMB'] = round(
ol_options['size'] / 2.0 ** 20, 2
)
oplog = localdb[ol_collection_name]
oplog_data['usedSizeMB'] = round(
localdb.command("collstats", ol_collection_name)['size'] / 2.0 ** 20, 2
)
op_asc_cursor = oplog.find().sort("$natural", pymongo.ASCENDING).limit(1)
op_dsc_cursor = oplog.find().sort("$natural", pymongo.DESCENDING).limit(1)
try:
first_timestamp = op_asc_cursor[0]['ts'].as_datetime()
last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()
oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)
except (IndexError, KeyError):
# if the oplog collection doesn't have any entries
# if an object in the collection doesn't have a ts value, we ignore it
pass
except KeyError:
# encountered an error trying to access options.size for the oplog collection
self.log.warning(u"Failed to record `ReplicationInfo` metrics.")
for (m, value) in oplog_data.iteritems():
submit_method, metric_name_alias = \
self._resolve_metric('oplog.%s' % m, metrics_to_collect)
submit_method(self, metric_name_alias, value, tags=tags)
else:
self.log.debug('"local" database not in dbnames. Not collecting ReplicationInfo metrics')
# get collection level stats
try:
# Ensure that you're on the right db
db = cli[db_name]
# grab the collections from the configutation
coll_names = instance.get('collections', [])
# loop through the collections
for coll_name in coll_names:
# grab the stats from the collection
stats = db.command("collstats", coll_name)
# loop through the metrics
for m in self.collection_metrics_names:
coll_tags = tags + ["db:%s" % db_name, "collection:%s" % coll_name]
value = stats.get(m, None)
if not value:
continue
# if it's the index sizes, then it's a dict.
if m == 'indexSizes':
submit_method, metric_name_alias = \
self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)
# loop through the indexes
for (idx, val) in value.iteritems():
# we tag the index
idx_tags = coll_tags + ["index:%s" % idx]
submit_method(self, metric_name_alias, val, tags=idx_tags)
else:
submit_method, metric_name_alias = \
self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)
submit_method(self, metric_name_alias, value, tags=coll_tags)
except Exception as e:
self.log.warning(u"Failed to record `collection` metrics.")
self.log.exception(e)
| {
"content_hash": "16c5c0832871ca4b85688cafd6db0739",
"timestamp": "",
"source": "github",
"line_count": 1049,
"max_line_length": 181,
"avg_line_length": 39.74261201143947,
"alnum_prop": 0.5661069800911489,
"repo_name": "s-maj/integrations-core",
"id": "ce4b02bdc43c58ed344c55925ef3dec8c4596de8",
"size": "41699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongo/check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Erlang",
"bytes": "15429"
},
{
"name": "Go",
"bytes": "1471"
},
{
"name": "PLSQL",
"bytes": "28501"
},
{
"name": "Perl",
"bytes": "5845"
},
{
"name": "Python",
"bytes": "1758957"
},
{
"name": "Ruby",
"bytes": "177489"
},
{
"name": "Shell",
"bytes": "11831"
}
],
"symlink_target": ""
} |
import os
import atexit
import string
import importlib
import threading
import socket
from time import sleep
def BYTE(message):
return bytes("%s\r\n" % message, "UTF-8")
class UserInput(threading.Thread):
isRunning = False
parent = None
def __init__(self, bot):
super().__init__()
self.parent = bot
self.setDaemon(True)
self.isRunning = False
self.start()
def createMessage(self, message):
temp = ""
for i in range(len(message)):
if (i != len(message) - 1):
temp += message[i] + " "
else:
temp += message[i]
return temp
def run(self):
self.isRunning = True
while (self.isRunning):
try:
message = input()
message = message.split(" ")
if (message[0] != ""):
if (message[0] == "/r" or message[0] == "/reload"):
self.parent.reloadAll()
elif (message[0] == "/q" or message[0] == "/quit"):
print("Quitting.")
self.parent.quit()
self.isRunning = False
elif (message[0] == "/j" or message[0] == "/join"):
if (len(message) < 2 or len(message) > 2):
print("Incorrect usage.")
else:
self.parent.switch(message[1])
elif (message[0] == "/l" or message[0] == "/leave"):
if (len(message) >= 2):
if (len(message) > 2):
for i in range(1, len(message)):
self.parent.leave(message[i], False)
if (len(self.parent.channels) > 0):
self.parent.focusedChannel = self.parent.channels[0]
print("Left channels. Focusing on %s" % self.parent.focusedChannel)
else:
print("No channels left.")
else:
self.parent.leave(message[1], False)
if (len(self.parent.channels) > 0):
self.parent.focusedChannel = self.parent.channels[0]
print("Left %s. Focusing on %s" % (message[1], self.parent.focusedChannel))
else:
print("No channels left.")
else:
print("Incorrect usage.")
elif (message[0] == "/?" or message[0] == "/help"):
print("1. Type anything to chat with others in %s." % self.parent.focusedChannel)
print("2. /? or /help -- Bring up the bot commands.")
print("3. /j or /join -- Join a new channel. Channel focus will switch over.")
print("4. /l or /leave -- Leave channel. Channel focus will change.")
print("5. /r or /reload -- Reload all plugins. (Hotswapping is supported.)")
print("6. /q or /quit -- Quit the bot.")
else:
self.parent.s.send(BYTE("PRIVMSG %s :%s" % (self.parent.focusedChannel, self.createMessage(message))))
except WindowsError as winError:
print(winError)
if (self.parent.s != None):
self.parent.s.close(socket.SHUT_RDWR)
self.parent.s = None
self.parent.connect()
except Exception as error:
print(error)
| {
"content_hash": "0a26a878afc80d2123f578af0d6f49ca",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 108,
"avg_line_length": 32.08139534883721,
"alnum_prop": 0.5980427691192461,
"repo_name": "tommai78101/IRCBot",
"id": "33e692c0e4fcc91d8bacedd29fb7d98e89715ff8",
"size": "2759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UserInput.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82053"
}
],
"symlink_target": ""
} |
from config import getRootDir
# DEBUG
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = ['*']
#设置数据库的信息,此处使用mysql
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-CN'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = getRootDir() + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
#SECRET_KEY = '^@*6zx%t3fv$jvm972#-+#pv(hmd#cle8(kysod(%*-=!oy7)&'
SECRET_KEY = 'o&7tq(q&bp!5l#y6!#x%(z6qs^03o^3)_)y)5awif1$7w8-@*2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'apidemo.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'apidemo.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#'/home/liufh/workspace/helios/apidemo/template'
getRootDir() + '/template'
# current_path = os.path.dirname(__file__)
# os.path.join(os.path.dirname(__file__), 'template').replace('\\','/'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
# 'apidemo.center.console.context_processors.menu',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'videos',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOG_FILE = '%s/apidemolog.txt' % getRootDir()
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'file': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'formatter': 'simple',
'filename': LOG_FILE,
},
},
'loggers': {
'apidemo': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
}
} | {
"content_hash": "9c7a940cb4659ba7477c0a7bc8086a14",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 106,
"avg_line_length": 33.82122905027933,
"alnum_prop": 0.6610505450941526,
"repo_name": "ferstar/spark-api-demo-python",
"id": "3ca2b059c9a9c74f2bfc0b2072b4a6343fad1943",
"size": "6119",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apidemo/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "27728"
},
{
"name": "Python",
"bytes": "20222"
}
],
"symlink_target": ""
} |
from page_sets.login_helpers import login_utils
def LoginAccount(action_runner, credential,
credentials_path=login_utils.DEFAULT_CREDENTIAL_PATH):
"""Logs in into a Dropbox account.
This function navigates the tab into Dropbox's login page and logs in a user
using credentials in |credential| part of the |credentials_path| file.
Args:
action_runner: Action runner responsible for running actions on the page.
credential: The credential to retrieve from the credentials file (string).
credentials_path: The path to credential file (string).
Raises:
exceptions.Error: See ExecuteJavaScript()
for a detailed list of possible exceptions.
"""
account_name, password = login_utils.GetAccountNameAndPassword(
credential, credentials_path=credentials_path)
action_runner.Navigate('https://www.dropbox.com/login')
login_utils.InputWithSelector(
action_runner, account_name, 'input[name=login_email]')
login_utils.InputWithSelector(
action_runner, password, 'input[name=login_password]')
# Wait until the "Sign in" button is enabled and then click it.
login_button_selector = '.login-form .login-button'
action_runner.WaitForJavaScriptCondition('''
(function() {
var loginButton = document.querySelector({{ selector }});
if (!loginButton)
return false;
return !loginButton.disabled;
})();''',
selector=login_button_selector)
action_runner.ClickElement(selector=login_button_selector)
action_runner.WaitForNavigate()
| {
"content_hash": "7a90f016855be95af394a4f640020964",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 38.775,
"alnum_prop": 0.720180528691167,
"repo_name": "nwjs/chromium.src",
"id": "ff26f6b617b9c42538dd4c8baf79ccb5868eb704",
"size": "1692",
"binary": false,
"copies": "7",
"ref": "refs/heads/nw70",
"path": "tools/perf/page_sets/login_helpers/dropbox_login.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from invites.models import Invite
from django.contrib import admin
class InviteAdmin(admin.ModelAdmin):
filter_horizontal = ('invited_users',)
list_display = ('generated_by', 'group', 'key')
list_filter = ('group',)
search_fields = ('generated_by__username', 'group__name', 'key')
admin.site.register(Invite, InviteAdmin)
| {
"content_hash": "2f0f35c922b0bfaa8ee4ca8af9352e36",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 28.5,
"alnum_prop": 0.695906432748538,
"repo_name": "znick/anytask",
"id": "1d1afc49b4f94e0a43ba0af74fad58cb47c67abc",
"size": "342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anytask/invites/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89720"
},
{
"name": "Dockerfile",
"bytes": "7709"
},
{
"name": "HTML",
"bytes": "826638"
},
{
"name": "JavaScript",
"bytes": "296467"
},
{
"name": "Less",
"bytes": "7302"
},
{
"name": "Python",
"bytes": "965878"
},
{
"name": "Shell",
"bytes": "30922"
}
],
"symlink_target": ""
} |
from ctypes import c_void_p
from types import NoneType
from django.contrib.gis.gdal.error import GDALException
class GDALBase(object):
"""
Base object for GDAL objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, int):
self._ptr = self.ptr_type(ptr)
elif isinstance(ptr, (self.ptr_type, NoneType)):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
ptr = property(_get_ptr, _set_ptr)
| {
"content_hash": "e2d762d1561473301c05f6564e3a523c",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 95,
"avg_line_length": 33.42857142857143,
"alnum_prop": 0.6102564102564103,
"repo_name": "greggian/TapdIn",
"id": "5086c9b761ba65b6b044cd5f1d02e4118d9cc168",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/gdal/base.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "82525"
},
{
"name": "Python",
"bytes": "3585862"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_warren_large_fog_red.iff"
result.attribute_template_id = -1
result.stfName("lair_n","warren")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "4ecf858cd7f0fb2138c797b63a0b5c9f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 91,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.692063492063492,
"repo_name": "obi-two/Rebelion",
"id": "88026160ff81ab0d77d58fba18f73fa09f515df5",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_warren_large_fog_red.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
from app import views, models | {
"content_hash": "2ea6a60d11b5a408443d3736c4ffedb5",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 19.11111111111111,
"alnum_prop": 0.7616279069767442,
"repo_name": "realdubb/flask-microblog",
"id": "b99bf4e5c27e33abe385069baa3f2f7b846c98ea",
"size": "172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1797"
},
{
"name": "Python",
"bytes": "5238"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class EditIdentity(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the EditIdentity Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/SendGrid/NewsletterAPI/Identity/EditIdentity')
def new_input_set(self):
return EditIdentityInputSet()
def _make_result_set(self, result, path):
return EditIdentityResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return EditIdentityChoreographyExecution(session, exec_id, path)
class EditIdentityInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the EditIdentity
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
InputSet._set_input(self, 'APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid. )
"""
InputSet._set_input(self, 'APIUser', value)
def set_Address(self, value):
"""
Set the value of the Address input for this Choreo. ((required, string) The new physical address to be used for this Identity.)
"""
InputSet._set_input(self, 'Address', value)
def set_City(self, value):
"""
Set the value of the City input for this Choreo. ((required, string) The new city for this Identity.)
"""
InputSet._set_input(self, 'City', value)
def set_Country(self, value):
"""
Set the value of the Country input for this Choreo. ((required, string) The new country to be associated with this Identity.)
"""
InputSet._set_input(self, 'Country', value)
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((required, string) An email address to be used for this identity.)
"""
InputSet._set_input(self, 'Email', value)
def set_Identity(self, value):
"""
Set the value of the Identity input for this Choreo. ((required, string) The identity that is to be edited.)
"""
InputSet._set_input(self, 'Identity', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((required, string) The new name to be associated with this identity.)
"""
InputSet._set_input(self, 'Name', value)
def set_NewIdentity(self, value):
"""
Set the value of the NewIdentity input for this Choreo. ((optional, string) The new name for this identity.)
"""
InputSet._set_input(self, 'NewIdentity', value)
def set_ReplyTo(self, value):
"""
Set the value of the ReplyTo input for this Choreo. ((required, string) An email address to be used in the Reply-To field.)
"""
InputSet._set_input(self, 'ReplyTo', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid: Soecify json, or xml. Default is set to json.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
def set_State(self, value):
"""
Set the value of the State input for this Choreo. ((required, string) The state to be associated with this Identity.)
"""
InputSet._set_input(self, 'State', value)
def set_Zip(self, value):
"""
Set the value of the Zip input for this Choreo. ((required, integer) The new zip code associated with this Identity.)
"""
InputSet._set_input(self, 'Zip', value)
class EditIdentityResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the EditIdentity Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class EditIdentityChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return EditIdentityResultSet(response, path)
| {
"content_hash": "f1bc44744cc447eb5d69865a1b92b700",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 179,
"avg_line_length": 42.41880341880342,
"alnum_prop": 0.6540398952246625,
"repo_name": "egetzel/wecrow",
"id": "475850371fadb424ddbaa04b140817b5530d3f3b",
"size": "5221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "truehand2014/temboo/Library/SendGrid/NewsletterAPI/Identity/EditIdentity.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "11736"
},
{
"name": "Python",
"bytes": "474202"
}
],
"symlink_target": ""
} |
"""
Shelter Registry - Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# S3 framework functions
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return s3db.cms_index(module, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# Just redirect to the list of Shelters
redirect(URL(f="shelter"))
# =============================================================================
def shelter_type():
"""
RESTful CRUD controller
List / add shelter types (e.g. NGO-operated, Government evacuation center,
School, Hospital -- see Agasti opt_camp_type.)
"""
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def shelter_service():
"""
RESTful CRUD controller
List / add shelter services (e.g. medical, housing, food, ...)
"""
output = s3_rest_controller()
return output
# =============================================================================
def shelter():
"""
RESTful CRUD controller
"""
table = s3db.cr_shelter
# Access Presence from Shelters for Check-In/Check-Out
s3db.add_component("pr_presence",
cr_shelter="shelter_id")
s3db.configure("cr_shelter",
# Go to People check-in for this shelter after creation
create_next = URL(c="cr", f="shelter",
args=["[id]", "presence"]))
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.method == "import":
table.organisation_id.default = None
if r.component and r.component.name == "presence":
prtable = db.pr_presence
r.resource.add_filter(prtable.closed == False)
if r.interactive:
if r.id:
table.obsolete.readable = table.obsolete.writable = True
if r.component:
if r.component.name == "inv_item" or \
r.component.name == "recv" or \
r.component.name == "send":
# Filter out items which are already in this inventory
s3db.inv_prep(r)
elif r.component.name == "human_resource":
# Filter out people which are already staff for this warehouse
s3base.s3_filter_staff(r)
# Make it clear that this is for adding new staff, not assigning existing
s3.crud_strings.hrm_human_resource.label_create_button = T("Add New Staff Member")
# Cascade the organisation_id from the hospital to the staff
field = s3db.hrm_human_resource.organisation_id
field.default = r.record.organisation_id
field.writable = False
elif r.component.name == "rat":
# Hide the Implied fields
db.assess_rat.location_id.writable = False
db.assess_rat.location_id.default = r.record.location_id
db.assess_rat.location_id.comment = ""
# Set defaults
staff_id = auth.s3_logged_in_human_resource()
if staff_id:
db.assess_rat.staff_id.default = staff_id.id
elif r.component.name == "presence":
field = prtable.shelter_id
represent = s3base.S3Represent(lookup="cr_shelter")
field.requires = IS_NULL_OR(IS_ONE_OF(db, "cr_shelter.id",
represent,
sort=True))
field.represent = represent
field.ondelete = "RESTRICT"
if settings.get_ui_label_camp():
HELP = T("The Camp this person is checking into.")
else:
HELP = T("The Shelter this person is checking into.")
ADD_SHELTER = s3.ADD_SHELTER
SHELTER_LABEL = s3.SHELTER_LABEL
field.comment = S3AddResourceLink(c="cr",
f="shelter",
title=ADD_SHELTER,
tooltip=HELP)
field.label = SHELTER_LABEL
field.readable = True
field.writable = True
if settings.get_ui_label_camp():
REGISTER_LABEL = T("Register Person into this Camp")
EMPTY_LIST = T("No People currently registered in this camp")
else:
REGISTER_LABEL = T("Register Person into this Shelter")
EMPTY_LIST = T("No People currently registered in this shelter")
# Make pr_presence.pe_id visible:
pe_id = prtable.pe_id
pe_id.readable = pe_id.writable = True
# Usually, the pe_id field is an invisible foreign key, therefore it
# has no default representation/requirements => need to add this here:
pe_id.label = T("Person/Group")
pe_represent = s3db.pr_PersonEntityRepresent(show_label=True)
pe_id.represent = pe_represent
pe_id.requires = IS_ONE_OF(db, "pr_pentity.pe_id",
pe_represent,
filterby="instance_type",
orderby="instance_type",
filter_opts=("pr_person",
"pr_group"))
pe_id.widget = S3AutocompleteWidget("pr", "pentity")
gtable = s3db.pr_group
add_group_label = s3base.S3CRUD.crud_string("pr_group", "label_create_button")
pe_id.comment = \
DIV(s3db.pr_person_comment(T("Add Person"), REGISTER_LABEL, child="pe_id"),
S3AddResourceLink(c="pr",
f="group",
title=add_group_label,
tooltip=T("Create a group entry in the registry."))
)
# Make Persons a component of Presence to add to list_fields
s3db.add_component("pr_person",
pr_presence=Storage(joinby="pe_id", pkey="pe_id"))
s3db.configure("pr_presence",
# presence not deletable in this view! (need to register a check-out
# for the same person instead):
deletable=False,
list_fields=["id",
"pe_id",
"datetime",
"presence_condition",
#"proc_desc",
"person.age_group",
])
# Hide the Implied fields
lfield = prtable.location_id
lfield.writable = False
lfield.default = r.record.location_id
lfield.comment = ""
prtable.proc_desc.readable = prtable.proc_desc.writable = False
# Set defaults
#prtable.datetime.default = request.utcnow
#prtable.observer.default = s3_logged_in_person()
popts = s3.pr_presence_opts
pcnds = s3.pr_presence_conditions
cr_shelter_presence_opts = {
popts.CHECK_IN: pcnds[popts.CHECK_IN],
popts.CHECK_OUT: pcnds[popts.CHECK_OUT]
}
prtable.presence_condition.requires = IS_IN_SET(
cr_shelter_presence_opts, zero=None)
prtable.presence_condition.default = popts.CHECK_IN
# Change the Labels
s3.crud_strings.pr_presence = Storage(
title_create = T("Register Person"),
title_display = T("Registration Details"),
title_list = T("Registered People"),
title_update = T("Edit Registration"),
title_search = T("Search Registations"),
subtitle_create = REGISTER_LABEL,
label_list_button = T("List Registrations"),
label_create_button = T("Register Person"),
msg_record_created = T("Registration added"),
msg_record_modified = T("Registration updated"),
msg_record_deleted = T("Registration entry deleted"),
msg_list_empty = EMPTY_LIST
)
elif r.component.name == "req":
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
s3db.req_create_form_mods()
return True
s3.prep = prep
output = s3_rest_controller(rheader = s3db.cr_shelter_rheader,
hide_filter = False,
)
return output
# =============================================================================
def incoming():
""" Incoming Shipments """
return inv_incoming()
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests """
return s3db.req_match()
# END =========================================================================
| {
"content_hash": "685358a0c7e8ae4009279970408befee",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 103,
"avg_line_length": 44.579166666666666,
"alnum_prop": 0.43574165809888776,
"repo_name": "sammyshj/gci",
"id": "365b29d8e5580d37ac38d1e3e28bbcb2f0c9c159",
"size": "10724",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "controllers/cr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1375094"
},
{
"name": "JavaScript",
"bytes": "16625771"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "25684403"
},
{
"name": "Racket",
"bytes": "166"
},
{
"name": "Shell",
"bytes": "727"
},
{
"name": "XSLT",
"bytes": "2003150"
}
],
"symlink_target": ""
} |
"""
Test Cases
Please see README.rst or DOCS.rst or
http://chrisglass.github.com/django_polymorphic/
"""
from __future__ import print_function
import uuid
import re
import django
try:
from unittest import skipIf
except ImportError:
# python<2.7
from django.utils.unittest import skipIf
from django.db.models.query import QuerySet
from django.test import TestCase
from django.db.models import Q, Count
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from polymorphic.models import PolymorphicModel
from polymorphic.manager import PolymorphicManager
from polymorphic.query import PolymorphicQuerySet
from polymorphic.showfields import ShowFieldContent, ShowFieldType, \
ShowFieldTypeAndContent
if django.VERSION < (1, 8):
from polymorphic.tools_for_tests import UUIDField
else:
from django.db.models import UUIDField
class PlainA(models.Model):
field1 = models.CharField(max_length=10)
class PlainB(PlainA):
field2 = models.CharField(max_length=10)
class PlainC(PlainB):
field3 = models.CharField(max_length=10)
class Model2A(ShowFieldType, PolymorphicModel):
field1 = models.CharField(max_length=10)
class Model2B(Model2A):
field2 = models.CharField(max_length=10)
class Model2C(Model2B):
field3 = models.CharField(max_length=10)
class Model2D(Model2C):
field4 = models.CharField(max_length=10)
class ModelExtraA(ShowFieldTypeAndContent, PolymorphicModel):
field1 = models.CharField(max_length=10)
class ModelExtraB(ModelExtraA):
field2 = models.CharField(max_length=10)
class ModelExtraC(ModelExtraB):
field3 = models.CharField(max_length=10)
class ModelExtraExternal(models.Model):
topic = models.CharField(max_length=10)
class ModelShow1(ShowFieldType, PolymorphicModel):
field1 = models.CharField(max_length=10)
m2m = models.ManyToManyField('self')
class ModelShow2(ShowFieldContent, PolymorphicModel):
field1 = models.CharField(max_length=10)
m2m = models.ManyToManyField('self')
class ModelShow3(ShowFieldTypeAndContent, PolymorphicModel):
field1 = models.CharField(max_length=10)
m2m = models.ManyToManyField('self')
class ModelShow1_plain(PolymorphicModel):
field1 = models.CharField(max_length=10)
class ModelShow2_plain(ModelShow1_plain):
field2 = models.CharField(max_length=10)
class Base(ShowFieldType, PolymorphicModel):
field_b = models.CharField(max_length=10)
class ModelX(Base):
field_x = models.CharField(max_length=10)
class ModelY(Base):
field_y = models.CharField(max_length=10)
class Enhance_Plain(models.Model):
field_p = models.CharField(max_length=10)
class Enhance_Base(ShowFieldTypeAndContent, PolymorphicModel):
base_id = models.AutoField(primary_key=True)
field_b = models.CharField(max_length=10)
class Enhance_Inherit(Enhance_Base, Enhance_Plain):
field_i = models.CharField(max_length=10)
class RelationBase(ShowFieldTypeAndContent, PolymorphicModel):
field_base = models.CharField(max_length=10)
fk = models.ForeignKey('self', null=True, related_name='relationbase_set')
m2m = models.ManyToManyField('self')
class RelationA(RelationBase):
field_a = models.CharField(max_length=10)
class RelationB(RelationBase):
field_b = models.CharField(max_length=10)
class RelationBC(RelationB):
field_c = models.CharField(max_length=10)
class RelatingModel(models.Model):
many2many = models.ManyToManyField(Model2A)
class One2OneRelatingModel(PolymorphicModel):
one2one = models.OneToOneField(Model2A)
field1 = models.CharField(max_length=10)
class One2OneRelatingModelDerived(One2OneRelatingModel):
field2 = models.CharField(max_length=10)
class MyManagerQuerySet(PolymorphicQuerySet):
def my_queryset_foo(self):
# Just a method to prove the existance of the custom queryset:
return self.all()
class MyManager(PolymorphicManager):
queryset_class = MyManagerQuerySet
def get_queryset(self):
return super(MyManager, self).get_queryset().order_by('-field1')
# Django <= 1.5 compatibility
get_query_set = get_queryset
class ModelWithMyManager(ShowFieldTypeAndContent, Model2A):
objects = MyManager()
field4 = models.CharField(max_length=10)
if django.VERSION >= (1, 7):
class ModelWithMyManager2(ShowFieldTypeAndContent, Model2A):
objects = MyManagerQuerySet.as_manager()
field4 = models.CharField(max_length=10)
class MROBase1(ShowFieldType, PolymorphicModel):
objects = MyManager()
# needed as MyManager uses it:
field1 = models.CharField(max_length=10)
class MROBase2(MROBase1):
# Django vanilla inheritance does not inherit MyManager as _default_manager
# here:
pass
class MROBase3(models.Model):
objects = PolymorphicManager()
class MRODerived(MROBase2, MROBase3):
pass
class ParentModelWithManager(PolymorphicModel):
pass
class ChildModelWithManager(PolymorphicModel):
# Also test whether foreign keys receive the manager:
fk = models.ForeignKey(
ParentModelWithManager,
related_name='childmodel_set'
)
objects = MyManager()
class PlainMyManagerQuerySet(QuerySet):
def my_queryset_foo(self):
# Just a method to prove the existance of the custom queryset:
return self.all()
class PlainMyManager(models.Manager):
def my_queryset_foo(self):
return self.get_queryset().my_queryset_foo()
def get_queryset(self):
return PlainMyManagerQuerySet(self.model, using=self._db)
# Django <= 1.5 compatibility
get_query_set = get_queryset
class PlainParentModelWithManager(models.Model):
pass
class PlainChildModelWithManager(models.Model):
fk = models.ForeignKey(
PlainParentModelWithManager,
related_name='childmodel_set'
)
objects = PlainMyManager()
class MgrInheritA(models.Model):
mgrA = models.Manager()
mgrA2 = models.Manager()
field1 = models.CharField(max_length=10)
class MgrInheritB(MgrInheritA):
mgrB = models.Manager()
field2 = models.CharField(max_length=10)
class MgrInheritC(ShowFieldTypeAndContent, MgrInheritB):
pass
class BlogBase(ShowFieldTypeAndContent, PolymorphicModel):
name = models.CharField(max_length=10)
class BlogA(BlogBase):
info = models.CharField(max_length=10)
class BlogB(BlogBase):
pass
class BlogEntry(ShowFieldTypeAndContent, PolymorphicModel):
blog = models.ForeignKey(BlogA)
text = models.CharField(max_length=10)
class BlogEntry_limit_choices_to(ShowFieldTypeAndContent, PolymorphicModel):
blog = models.ForeignKey(BlogBase)
text = models.CharField(max_length=10)
class ModelFieldNameTest(ShowFieldType, PolymorphicModel):
modelfieldnametest = models.CharField(max_length=10)
class InitTestModel(ShowFieldType, PolymorphicModel):
bar = models.CharField(max_length=100)
def __init__(self, *args, **kwargs):
kwargs['bar'] = self.x()
super(InitTestModel, self).__init__(*args, **kwargs)
class InitTestModelSubclass(InitTestModel):
def x(self):
return 'XYZ'
# models from github issue
class Top(PolymorphicModel):
name = models.CharField(max_length=50)
class Meta:
ordering = ('pk',)
class Middle(Top):
description = models.TextField()
class Bottom(Middle):
author = models.CharField(max_length=50)
class UUIDProject(ShowFieldTypeAndContent, PolymorphicModel):
uuid_primary_key = UUIDField(
primary_key=True,
default=uuid.uuid1
)
topic = models.CharField(max_length=30)
class UUIDArtProject(UUIDProject):
artist = models.CharField(max_length=30)
class UUIDResearchProject(UUIDProject):
supervisor = models.CharField(max_length=30)
class UUIDPlainA(models.Model):
uuid_primary_key = UUIDField(
primary_key=True,
default=uuid.uuid1
)
field1 = models.CharField(max_length=10)
class UUIDPlainB(UUIDPlainA):
field2 = models.CharField(max_length=10)
class UUIDPlainC(UUIDPlainB):
field3 = models.CharField(max_length=10)
# base -> proxy
class ProxyBase(PolymorphicModel):
some_data = models.CharField(max_length=128)
class ProxyChild(ProxyBase):
class Meta:
proxy = True
class NonProxyChild(ProxyBase):
name = models.CharField(max_length=10)
# base -> proxy -> real models
class ProxiedBase(ShowFieldTypeAndContent, PolymorphicModel):
name = models.CharField(max_length=10)
class ProxyModelBase(ProxiedBase):
class Meta:
proxy = True
class ProxyModelA(ProxyModelBase):
field1 = models.CharField(max_length=10)
class ProxyModelB(ProxyModelBase):
field2 = models.CharField(max_length=10)
# test bad field name
# class TestBadFieldModel(ShowFieldType, PolymorphicModel):
# instance_of = models.CharField(max_length=10)
# validation error: "polymorphic.relatednameclash: Accessor for field
# 'polymorphic_ctype' clashes with related field
# 'ContentType.relatednameclash_set'." (reported by Andrew Ingram)
# fixed with related_name
class RelatedNameClash(ShowFieldType, PolymorphicModel):
ctype = models.ForeignKey(ContentType, null=True, editable=False)
# class with a parent_link to superclass, and a related_name back to subclass
class TestParentLinkAndRelatedName(ModelShow1_plain):
superclass = models.OneToOneField(
ModelShow1_plain,
parent_link=True,
related_name='related_name_subclass'
)
class CustomPkBase(ShowFieldTypeAndContent, PolymorphicModel):
b = models.CharField(max_length=1)
class CustomPkInherit(CustomPkBase):
custom_id = models.AutoField(primary_key=True)
i = models.CharField(max_length=1)
class PolymorphicTests(TestCase):
"""
The test suite
"""
def test_annotate_aggregate_order(self):
# create a blog of type BlogA
# create two blog entries in BlogA
# create some blogs of type BlogB to make the BlogBase table data
# really polymorphic
blog = BlogA.objects.create(name='B1', info='i1')
blog.blogentry_set.create(text='bla')
BlogEntry.objects.create(blog=blog, text='bla2')
BlogB.objects.create(name='Bb1')
BlogB.objects.create(name='Bb2')
BlogB.objects.create(name='Bb3')
qs = BlogBase.objects.annotate(entrycount=Count('BlogA___blogentry'))
self.assertEqual(len(qs), 4)
for o in qs:
if o.name == 'B1':
self.assertEqual(o.entrycount, 2)
else:
self.assertEqual(o.entrycount, 0)
x = BlogBase.objects.aggregate(entrycount=Count('BlogA___blogentry'))
self.assertEqual(x['entrycount'], 2)
# create some more blogs for next test
BlogA.objects.create(name='B2', info='i2')
BlogA.objects.create(name='B3', info='i3')
BlogA.objects.create(name='B4', info='i4')
BlogA.objects.create(name='B5', info='i5')
# test ordering for field in all entries
expected = '''
[ <BlogB: id 4, name (CharField) "Bb3">,
<BlogB: id 3, name (CharField) "Bb2">,
<BlogB: id 2, name (CharField) "Bb1">,
<BlogA: id 8, name (CharField) "B5", info (CharField) "i5">,
<BlogA: id 7, name (CharField) "B4", info (CharField) "i4">,
<BlogA: id 6, name (CharField) "B3", info (CharField) "i3">,
<BlogA: id 5, name (CharField) "B2", info (CharField) "i2">,
<BlogA: id 1, name (CharField) "B1", info (CharField) "i1"> ]'''
x = '\n' + repr(BlogBase.objects.order_by('-name'))
self.assertEqual(x, expected)
# test ordering for field in one subclass only
# MySQL and SQLite return this order
expected1 = '''
[ <BlogA: id 8, name (CharField) "B5", info (CharField) "i5">,
<BlogA: id 7, name (CharField) "B4", info (CharField) "i4">,
<BlogA: id 6, name (CharField) "B3", info (CharField) "i3">,
<BlogA: id 5, name (CharField) "B2", info (CharField) "i2">,
<BlogA: id 1, name (CharField) "B1", info (CharField) "i1">,
<BlogB: id 2, name (CharField) "Bb1">,
<BlogB: id 3, name (CharField) "Bb2">,
<BlogB: id 4, name (CharField) "Bb3"> ]'''
# PostgreSQL returns this order
expected2 = '''
[ <BlogB: id 2, name (CharField) "Bb1">,
<BlogB: id 3, name (CharField) "Bb2">,
<BlogB: id 4, name (CharField) "Bb3">,
<BlogA: id 8, name (CharField) "B5", info (CharField) "i5">,
<BlogA: id 7, name (CharField) "B4", info (CharField) "i4">,
<BlogA: id 6, name (CharField) "B3", info (CharField) "i3">,
<BlogA: id 5, name (CharField) "B2", info (CharField) "i2">,
<BlogA: id 1, name (CharField) "B1", info (CharField) "i1"> ]'''
x = '\n' + repr(BlogBase.objects.order_by('-BlogA___info'))
self.assertTrue(x == expected1 or x == expected2)
def test_limit_choices_to(self):
"""
this is not really a testcase, as limit_choices_to only affects the
Django admin
"""
# create a blog of type BlogA
BlogA.objects.create(name='aa', info='aa')
blog_b = BlogB.objects.create(name='bb')
# create two blog entries
BlogEntry_limit_choices_to.objects.create(
blog=blog_b, text='bla2'
)
BlogEntry_limit_choices_to.objects.create(
blog=blog_b, text='bla2'
)
def test_primary_key_custom_field_problem(self):
"""
object retrieval problem occuring with some custom primary key fields
(UUIDField as test case)
"""
UUIDProject.objects.create(topic="John's gathering")
UUIDArtProject.objects.create(
topic="Sculpting with Tim", artist="T. Turner"
)
UUIDResearchProject.objects.create(
topic="Swallow Aerodynamics", supervisor="Dr. Winter"
)
qs = UUIDProject.objects.all()
list(qs)
a = qs[0]
qs[1]
c = qs[2]
self.assertEqual(len(qs), 3)
self.assertIsInstance(a.uuid_primary_key, uuid.UUID)
self.assertIsInstance(a.pk, uuid.UUID)
res = re.sub(' "(.*?)..", topic', ', topic', repr(qs))
res_exp = """[ <UUIDProject: uuid_primary_key (UUIDField/pk), topic (CharField) "John's gathering">,
<UUIDArtProject: uuid_primary_key (UUIDField/pk), topic (CharField) "Sculpting with Tim", artist (CharField) "T. Turner">,
<UUIDResearchProject: uuid_primary_key (UUIDField/pk), topic (CharField) "Swallow Aerodynamics", supervisor (CharField) "Dr. Winter"> ]""" # noqa
self.assertEqual(res, res_exp)
a = UUIDPlainA.objects.create(field1='A1')
UUIDPlainB.objects.create(field1='B1', field2='B2')
c = UUIDPlainC.objects.create(field1='C1', field2='C2', field3='C3')
qs = UUIDPlainA.objects.all()
# Test that primary key values are valid UUIDs
self.assertEqual(uuid.UUID("urn:uuid:%s" % a.pk, version=1), a.pk)
self.assertEqual(uuid.UUID("urn:uuid:%s" % c.pk, version=1), c.pk)
def create_model2abcd(self):
"""
Create the chain of objects of Model2,
this is reused in various tests.
"""
Model2A.objects.create(field1='A1')
Model2B.objects.create(field1='B1', field2='B2')
Model2C.objects.create(field1='C1', field2='C2', field3='C3')
Model2D.objects.create(
field1='D1', field2='D2', field3='D3', field4='D4'
)
def test_simple_inheritance(self):
self.create_model2abcd()
objects = list(Model2A.objects.all())
self.assertEqual(
repr(objects[0]),
'<Model2A: id 1, field1 (CharField)>'
)
self.assertEqual(
repr(objects[1]),
'<Model2B: id 2, field1 (CharField), field2 (CharField)>'
)
self.assertEqual(
repr(objects[2]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>' # noqa
)
self.assertEqual(
repr(objects[3]),
'<Model2D: id 4, field1 (CharField), field2 (CharField), field3 (CharField), field4 (CharField)>' # noqa
)
def test_manual_get_real_instance(self):
self.create_model2abcd()
o = Model2A.objects.non_polymorphic().get(field1='C1')
self.assertEqual(
repr(o.get_real_instance()),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>' # noqa
)
def test_non_polymorphic(self):
self.create_model2abcd()
objects = list(Model2A.objects.all().non_polymorphic())
self.assertEqual(
repr(objects[0]), '<Model2A: id 1, field1 (CharField)>'
)
self.assertEqual(
repr(objects[1]), '<Model2A: id 2, field1 (CharField)>'
)
self.assertEqual(
repr(objects[2]), '<Model2A: id 3, field1 (CharField)>'
)
self.assertEqual(
repr(objects[3]), '<Model2A: id 4, field1 (CharField)>'
)
def test_get_real_instances(self):
self.create_model2abcd()
qs = Model2A.objects.all().non_polymorphic()
# from queryset
objects = qs.get_real_instances()
self.assertEqual(
repr(objects[0]),
'<Model2A: id 1, field1 (CharField)>'
)
self.assertEqual(
repr(objects[1]),
'<Model2B: id 2, field1 (CharField), field2 (CharField)>'
)
self.assertEqual(
repr(objects[2]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>' # noqa
)
self.assertEqual(
repr(objects[3]),
'<Model2D: id 4, field1 (CharField), field2 (CharField), field3 (CharField), field4 (CharField)>' # noqa
)
# from a manual list
objects = Model2A.objects.get_real_instances(list(qs))
self.assertEqual(
repr(objects[0]),
'<Model2A: id 1, field1 (CharField)>'
)
self.assertEqual(
repr(objects[1]),
'<Model2B: id 2, field1 (CharField), field2 (CharField)>'
)
self.assertEqual(
repr(objects[2]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>' # noqa
)
self.assertEqual(
repr(objects[3]),
'<Model2D: id 4, field1 (CharField), field2 (CharField), field3 (CharField), field4 (CharField)>') # noqa
def test_translate_polymorphic_q_object(self):
self.create_model2abcd()
q = Model2A.translate_polymorphic_Q_object(Q(instance_of=Model2C))
objects = Model2A.objects.filter(q)
self.assertEqual(
repr(objects[0]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>' # noqa
)
self.assertEqual(
repr(objects[1]),
'<Model2D: id 4, field1 (CharField), field2 (CharField), field3 (CharField), field4 (CharField)>' # noqa
)
def test_base_manager(self):
def show_base_manager(model):
return "{0} {1}".format(
repr(type(model._base_manager)),
repr(model._base_manager.model)
)
self.assertEqual(
show_base_manager(PlainA),
"<class 'django.db.models.manager.Manager'> <class 'polymorphic.tests.PlainA'>" # noqa
)
self.assertEqual(
show_base_manager(PlainB),
"<class 'django.db.models.manager.Manager'> <class 'polymorphic.tests.PlainB'>" # noqa
)
self.assertEqual(
show_base_manager(PlainC),
"<class 'django.db.models.manager.Manager'> <class 'polymorphic.tests.PlainC'>" # noqa
)
self.assertEqual(
show_base_manager(Model2A),
"<class 'polymorphic.manager.PolymorphicManager'> <class 'polymorphic.tests.Model2A'>" # noqa
)
self.assertEqual(
show_base_manager(Model2B),
"<class 'django.db.models.manager.Manager'> <class 'polymorphic.tests.Model2B'>" # noqa
)
self.assertEqual(
show_base_manager(Model2C),
"<class 'django.db.models.manager.Manager'> <class 'polymorphic.tests.Model2C'>" # noqa
)
self.assertEqual(
show_base_manager(One2OneRelatingModel),
"<class 'polymorphic.manager.PolymorphicManager'> <class 'polymorphic.tests.One2OneRelatingModel'>" # noqa
)
self.assertEqual(
show_base_manager(One2OneRelatingModelDerived),
"<class 'django.db.models.manager.Manager'> <class 'polymorphic.tests.One2OneRelatingModelDerived'>" # noqa
)
def test_instance_default_manager(self):
def show_default_manager(instance):
return "{0} {1}".format(
repr(type(instance._default_manager)),
repr(instance._default_manager.model)
)
plain_a = PlainA(field1='C1')
plain_b = PlainB(field2='C1')
plain_c = PlainC(field3='C1')
model_2a = Model2A(field1='C1')
model_2b = Model2B(field2='C1')
model_2c = Model2C(field3='C1')
self.assertEqual(
show_default_manager(plain_a),
"<class 'django.db.models.manager.Manager'> <class 'polymorphic.tests.PlainA'>", # noqa
)
self.assertEqual(
show_default_manager(plain_b),
"<class 'django.db.models.manager.Manager'> <class 'polymorphic.tests.PlainB'>", # noqa
)
self.assertEqual(
show_default_manager(plain_c),
"<class 'django.db.models.manager.Manager'> <class 'polymorphic.tests.PlainC'>", # noqa
)
self.assertEqual(
show_default_manager(model_2a),
"<class 'polymorphic.manager.PolymorphicManager'> <class 'polymorphic.tests.Model2A'>", # noqa
)
self.assertEqual(
show_default_manager(model_2b),
"<class 'polymorphic.manager.PolymorphicManager'> <class 'polymorphic.tests.Model2B'>", # noqa
)
self.assertEqual(
show_default_manager(model_2c),
"<class 'polymorphic.manager.PolymorphicManager'> <class 'polymorphic.tests.Model2C'>", # noqa
)
def test_foreignkey_field(self):
self.create_model2abcd()
object2a = Model2A.base_objects.get(field1='C1')
self.assertEqual(
repr(object2a.model2b),
'<Model2B: id 3, field1 (CharField), field2 (CharField)>',
)
object2b = Model2B.base_objects.get(field1='C1')
self.assertEqual(
repr(object2b.model2c),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>', # noqa
)
def test_onetoone_field(self):
self.create_model2abcd()
a = Model2A.base_objects.get(field1='C1')
b = One2OneRelatingModelDerived.objects.create(
one2one=a,
field1='f1',
field2='f2',
)
# this result is basically wrong, probably due to Django cacheing (we
# used base_objects), but should not be a problem:
self.assertEqual(
repr(b.one2one),
'<Model2A: id 3, field1 (CharField)>',
)
c = One2OneRelatingModelDerived.objects.get(field1='f1')
self.assertEqual(
repr(c.one2one),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>', # noqa
)
self.assertEqual(
repr(a.one2onerelatingmodel),
'<One2OneRelatingModelDerived: One2OneRelatingModelDerived object>', # noqa
)
def test_manytomany_field(self):
# Model 1
o = ModelShow1.objects.create(field1='abc')
o.m2m.add(o)
o.save()
self.assertEqual(
repr(ModelShow1.objects.all()),
'[ <ModelShow1: id 1, field1 (CharField), m2m (ManyToManyField)> ]', # noqa
)
# Model 2
o = ModelShow2.objects.create(field1='abc')
o.m2m.add(o)
o.save()
self.assertEqual(
repr(ModelShow2.objects.all()),
'[ <ModelShow2: id 1, field1 "abc", m2m 1> ]',
)
# Model 3
o = ModelShow3.objects.create(field1='abc')
o.m2m.add(o)
o.save()
self.assertEqual(
repr(ModelShow3.objects.all()),
'[ <ModelShow3: id 1, field1 (CharField) "abc", m2m (ManyToManyField) 1> ]', # noqa
)
self.assertEqual(
repr(ModelShow1.objects.all().annotate(Count('m2m'))),
'[ <ModelShow1: id 1, field1 (CharField), m2m (ManyToManyField) - Ann: m2m__count (int)> ]', # noqa
)
self.assertEqual(
repr(ModelShow2.objects.all().annotate(Count('m2m'))),
'[ <ModelShow2: id 1, field1 "abc", m2m 1 - Ann: m2m__count 1> ]', # noqa
)
self.assertEqual(
repr(ModelShow3.objects.all().annotate(Count('m2m'))),
'[ <ModelShow3: id 1, field1 (CharField) "abc", m2m (ManyToManyField) 1 - Ann: m2m__count (int) 1> ]', # noqa
)
# no pretty printing
ModelShow1_plain.objects.create(field1='abc')
ModelShow2_plain.objects.create(field1='abc', field2='def')
self.assertEqual(
repr(ModelShow1_plain.objects.all()),
'[<ModelShow1_plain: ModelShow1_plain object>, <ModelShow2_plain: ModelShow2_plain object>]', # noqa
)
def test_extra_method(self):
self.create_model2abcd()
objects = list(Model2A.objects.extra(where=['id IN (2, 3)']))
self.assertEqual(
repr(objects[0]),
'<Model2B: id 2, field1 (CharField), field2 (CharField)>',
)
self.assertEqual(
repr(objects[1]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>', # noqa
)
objects = Model2A.objects.extra(
select={"select_test": "field1 = 'A1'"},
where=["field1 = 'A1' OR field1 = 'B1'"],
order_by=['-id'],
)
self.assertEqual(
repr(objects[0]),
'<Model2B: id 2, field1 (CharField), field2 (CharField) - Extra: select_test (int)>', # noqa
)
self.assertEqual(
repr(objects[1]),
'<Model2A: id 1, field1 (CharField) - Extra: select_test (int)>',
)
# Placed after the other tests, only verifying whether there are no
# more additional objects:
self.assertEqual(len(objects), 2)
ModelExtraA.objects.create(field1='A1')
ModelExtraB.objects.create(field1='B1', field2='B2')
ModelExtraC.objects.create(field1='C1', field2='C2', field3='C3')
ModelExtraExternal.objects.create(topic='extra1')
ModelExtraExternal.objects.create(topic='extra2')
ModelExtraExternal.objects.create(topic='extra3')
objects = ModelExtraA.objects.extra(
tables=["polymorphic_modelextraexternal"],
select={"topic": "polymorphic_modelextraexternal.topic"},
where=[
"polymorphic_modelextraa.id = polymorphic_modelextraexternal.id" # noqa
],
)
if six.PY3:
self.assertEqual(
repr(objects[0]),
'<ModelExtraA: id 1, field1 (CharField) "A1" - Extra: topic (str) "extra1">' # noqa
)
self.assertEqual(
repr(objects[1]),
'<ModelExtraB: id 2, field1 (CharField) "B1", field2 (CharField) "B2" - Extra: topic (str) "extra2">' # noqa
)
self.assertEqual(
repr(objects[2]),
'<ModelExtraC: id 3, field1 (CharField) "C1", field2 (CharField) "C2", field3 (CharField) "C3" - Extra: topic (str) "extra3">' # noqa
)
else:
self.assertEqual(
repr(objects[0]),
'<ModelExtraA: id 1, field1 (CharField) "A1" - Extra: topic (unicode) "extra1">' # noqa
)
self.assertEqual(
repr(objects[1]),
'<ModelExtraB: id 2, field1 (CharField) "B1", field2 (CharField) "B2" - Extra: topic (unicode) "extra2">' # noqa
)
self.assertEqual(
repr(objects[2]),
'<ModelExtraC: id 3, field1 (CharField) "C1", field2 (CharField) "C2", field3 (CharField) "C3" - Extra: topic (unicode) "extra3">' # noqa
)
self.assertEqual(len(objects), 3)
def test_instance_of_filter(self):
self.create_model2abcd()
objects = Model2A.objects.instance_of(Model2B)
self.assertEqual(
repr(objects[0]),
'<Model2B: id 2, field1 (CharField), field2 (CharField)>',
)
self.assertEqual(
repr(objects[1]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>', # noqa
)
self.assertEqual(
repr(objects[2]),
'<Model2D: id 4, field1 (CharField), field2 (CharField), field3 (CharField), field4 (CharField)>', # noqa
)
self.assertEqual(len(objects), 3)
objects = Model2A.objects.filter(instance_of=Model2B)
self.assertEqual(
repr(objects[0]),
'<Model2B: id 2, field1 (CharField), field2 (CharField)>',
)
self.assertEqual(
repr(objects[1]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>', # noqa
)
self.assertEqual(
repr(objects[2]),
'<Model2D: id 4, field1 (CharField), field2 (CharField), field3 (CharField), field4 (CharField)>', # noqa
)
self.assertEqual(len(objects), 3)
objects = Model2A.objects.filter(Q(instance_of=Model2B))
self.assertEqual(
repr(objects[0]),
'<Model2B: id 2, field1 (CharField), field2 (CharField)>',
)
self.assertEqual(
repr(objects[1]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>', # noqa
)
self.assertEqual(
repr(objects[2]),
'<Model2D: id 4, field1 (CharField), field2 (CharField), field3 (CharField), field4 (CharField)>', # noqa
)
self.assertEqual(len(objects), 3)
objects = Model2A.objects.not_instance_of(Model2B)
self.assertEqual(
repr(objects[0]), '<Model2A: id 1, field1 (CharField)>',
)
self.assertEqual(len(objects), 1)
def test_polymorphic___filter(self):
self.create_model2abcd()
objects = Model2A.objects.filter(
Q(Model2B___field2='B2') | Q(Model2C___field3='C3')
)
self.assertEqual(len(objects), 2)
self.assertEqual(
repr(objects[0]),
'<Model2B: id 2, field1 (CharField), field2 (CharField)>',
)
self.assertEqual(
repr(objects[1]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>', # noqa
)
def test_delete(self):
self.create_model2abcd()
oa = Model2A.objects.get(id=2)
self.assertEqual(
repr(oa),
'<Model2B: id 2, field1 (CharField), field2 (CharField)>')
self.assertEqual(Model2A.objects.count(), 4)
oa.delete()
objects = Model2A.objects.all()
self.assertEqual(
repr(objects[0]),
'<Model2A: id 1, field1 (CharField)>')
self.assertEqual(
repr(objects[1]),
'<Model2C: id 3, field1 (CharField), field2 (CharField), field3 (CharField)>', # noqa
)
self.assertEqual(
repr(objects[2]),
'<Model2D: id 4, field1 (CharField), field2 (CharField), field3 (CharField), field4 (CharField)>', # noqa
)
self.assertEqual(len(objects), 3)
def test_combine_querysets(self):
ModelX.objects.create(field_x='x')
ModelY.objects.create(field_y='y')
qs = Base.objects.instance_of(ModelX) | \
Base.objects.instance_of(ModelY)
self.assertEqual(
repr(qs[0]),
'<ModelX: id 1, field_b (CharField), field_x (CharField)>'
)
self.assertEqual(
repr(qs[1]),
'<ModelY: id 2, field_b (CharField), field_y (CharField)>'
)
self.assertEqual(len(qs), 2)
def test_multiple_inheritance(self):
# multiple inheritance, subclassing third party models (mix
# PolymorphicModel with models.Model)
Enhance_Base.objects.create(field_b='b-base')
Enhance_Inherit.objects.create(
field_b='b-inherit',
field_p='p',
field_i='i',
)
qs = Enhance_Base.objects.all()
self.assertEqual(len(qs), 2)
self.assertEqual(
repr(qs[0]),
'<Enhance_Base: base_id (AutoField/pk) 1, field_b (CharField) "b-base">', # noqa
)
self.assertEqual(
repr(qs[1]),
'<Enhance_Inherit: base_id (AutoField/pk) 2, field_b (CharField) "b-inherit", id 1, field_p (CharField) "p", field_i (CharField) "i">', # noqa
)
def test_relation_base(self):
# ForeignKey, ManyToManyField
obase = RelationBase.objects.create(field_base='base')
oa = RelationA.objects.create(field_base='A1', field_a='A2', fk=obase)
ob = RelationB.objects.create(field_base='B1', field_b='B2', fk=oa)
RelationBC.objects.create(
field_base='C1', field_b='C2', field_c='C3', fk=oa,
)
oa.m2m.add(oa)
oa.m2m.add(ob)
objects = RelationBase.objects.all()
self.assertEqual(
repr(objects[0]),
'<RelationBase: id 1, field_base (CharField) "base", fk (ForeignKey) None, m2m (ManyToManyField) 0>', # noqa
)
self.assertEqual(
repr(objects[1]),
'<RelationA: id 2, field_base (CharField) "A1", fk (ForeignKey) RelationBase, field_a (CharField) "A2", m2m (ManyToManyField) 2>', # noqa
)
self.assertEqual(
repr(objects[2]),
'<RelationB: id 3, field_base (CharField) "B1", fk (ForeignKey) RelationA, field_b (CharField) "B2", m2m (ManyToManyField) 1>', # noqa
)
self.assertEqual(
repr(objects[3]),
'<RelationBC: id 4, field_base (CharField) "C1", fk (ForeignKey) RelationA, field_b (CharField) "C2", field_c (CharField) "C3", m2m (ManyToManyField) 0>', # noqa
)
self.assertEqual(len(objects), 4)
oa = RelationBase.objects.get(id=2)
self.assertEqual(
repr(oa.fk),
'<RelationBase: id 1, field_base (CharField) "base", fk (ForeignKey) None, m2m (ManyToManyField) 0>', # noqa
)
objects = oa.relationbase_set.all()
self.assertEqual(
repr(objects[0]),
'<RelationB: id 3, field_base (CharField) "B1", fk (ForeignKey) RelationA, field_b (CharField) "B2", m2m (ManyToManyField) 1>', # noqa
)
self.assertEqual(
repr(objects[1]),
'<RelationBC: id 4, field_base (CharField) "C1", fk (ForeignKey) RelationA, field_b (CharField) "C2", field_c (CharField) "C3", m2m (ManyToManyField) 0>', # noqa
)
self.assertEqual(len(objects), 2)
ob = RelationBase.objects.get(id=3)
self.assertEqual(
repr(ob.fk),
'<RelationA: id 2, field_base (CharField) "A1", fk (ForeignKey) RelationBase, field_a (CharField) "A2", m2m (ManyToManyField) 2>', # noqa
)
oa = RelationA.objects.get()
objects = oa.m2m.all()
self.assertEqual(
repr(objects[0]),
'<RelationA: id 2, field_base (CharField) "A1", fk (ForeignKey) RelationBase, field_a (CharField) "A2", m2m (ManyToManyField) 2>', # noqa
)
self.assertEqual(
repr(objects[1]),
'<RelationB: id 3, field_base (CharField) "B1", fk (ForeignKey) RelationA, field_b (CharField) "B2", m2m (ManyToManyField) 1>', # noqa
)
self.assertEqual(len(objects), 2)
def test_user_defined_manager(self):
self.create_model2abcd()
ModelWithMyManager.objects.create(field1='D1a', field4='D4a')
ModelWithMyManager.objects.create(field1='D1b', field4='D4b')
# MyManager should reverse the sorting of field1
objects = ModelWithMyManager.objects.all()
self.assertEqual(
repr(objects[0]),
'<ModelWithMyManager: id 6, field1 (CharField) "D1b", field4 (CharField) "D4b">', # noqa
)
self.assertEqual(
repr(objects[1]),
'<ModelWithMyManager: id 5, field1 (CharField) "D1a", field4 (CharField) "D4a">', # noqa
)
self.assertEqual(len(objects), 2)
self.assertIs(type(ModelWithMyManager.objects), MyManager)
self.assertIs(type(ModelWithMyManager._default_manager), MyManager)
self.assertIs(type(ModelWithMyManager.base_objects), models.Manager)
@skipIf(django.VERSION < (1, 7), "This test needs Django 1.7+")
def test_user_defined_queryset_as_manager(self):
self.create_model2abcd()
ModelWithMyManager2.objects.create(field1='D1a', field4='D4a')
ModelWithMyManager2.objects.create(field1='D1b', field4='D4b')
objects = ModelWithMyManager2.objects.all()
self.assertEqual(
repr(objects[0]),
'<ModelWithMyManager2: id 5, field1 (CharField) "D1a", field4 (CharField) "D4a">' # noqa
)
self.assertEqual(
repr(objects[1]),
'<ModelWithMyManager2: id 6, field1 (CharField) "D1b", field4 (CharField) "D4b">' # noqa
)
self.assertEqual(len(objects), 2)
self.assertEqual(
type(ModelWithMyManager2.objects).__name__,
'PolymorphicManagerFromMyManagerQuerySet'
)
self.assertEqual(
type(ModelWithMyManager2._default_manager).__name__,
'PolymorphicManagerFromMyManagerQuerySet'
)
self.assertIs(
type(ModelWithMyManager2.base_objects),
models.Manager
)
def test_manager_inheritance(self):
# by choice of MRO, should be MyManager from MROBase1.
self.assertIs(type(MRODerived.objects), MyManager)
# check for correct default manager
self.assertIs(type(MROBase1._default_manager), MyManager)
# Django vanilla inheritance does not inherit MyManager as
# _default_manager here:
self.assertIs(type(MROBase2._default_manager), MyManager)
def test_queryset_assignment(self):
# This is just a consistency check for now, testing standard Django
# behavior:
parent = PlainParentModelWithManager.objects.create()
PlainChildModelWithManager.objects.create(fk=parent)
self.assertIs(
type(PlainParentModelWithManager._default_manager),
models.Manager
)
self.assertIs(
type(PlainChildModelWithManager._default_manager),
PlainMyManager
)
self.assertIs(
type(PlainChildModelWithManager.objects),
PlainMyManager
)
self.assertIs(
type(PlainChildModelWithManager.objects.all()),
PlainMyManagerQuerySet
)
# A related set is created using the model's _default_manager, so does
# gain extra methods:
self.assertIs(
type(parent.childmodel_set.my_queryset_foo()),
PlainMyManagerQuerySet
)
# For polymorphic models, the same should happen.
parent = ParentModelWithManager.objects.create()
ChildModelWithManager.objects.create(fk=parent)
self.assertIs(
type(ParentModelWithManager._default_manager),
PolymorphicManager
)
self.assertIs(
type(ChildModelWithManager._default_manager),
MyManager
)
self.assertIs(
type(ChildModelWithManager.objects),
MyManager
)
self.assertIs(
type(ChildModelWithManager.objects.my_queryset_foo()),
MyManagerQuerySet
)
# A related set is created using the model's _default_manager, so does
# gain extra methods.
self.assertIs(
type(parent.childmodel_set.my_queryset_foo()),
MyManagerQuerySet
)
def test_proxy_models(self):
# prepare some data
for data in ('bleep bloop', 'I am a', 'computer'):
ProxyChild.objects.create(some_data=data)
# this caches ContentType queries so they don't interfere with our
# query counts later
list(ProxyBase.objects.all())
# one query per concrete class
with self.assertNumQueries(1):
items = list(ProxyBase.objects.all())
self.assertIsInstance(items[0], ProxyChild)
def test_proxy_get_real_instance_class(self):
"""
The call to ``get_real_instance()`` also checks whether the returned
model is of the correct type.
This unit test guards that this check is working properly. For
instance, proxy child models need to be handled separately.
"""
name = "Item1"
nonproxychild = NonProxyChild.objects.create(name=name)
pb = ProxyBase.objects.get(id=1)
self.assertEqual(pb.get_real_instance_class(), NonProxyChild)
self.assertEqual(pb.get_real_instance(), nonproxychild)
self.assertEqual(pb.name, name)
pbm = ProxyChild.objects.get(id=1)
self.assertEqual(pbm.get_real_instance_class(), NonProxyChild)
self.assertEqual(pbm.get_real_instance(), nonproxychild)
self.assertEqual(pbm.name, name)
def test_content_types_for_proxy_models(self):
"""Checks if ContentType is capable of returning proxy models."""
from django.contrib.contenttypes.models import ContentType
ct = ContentType.objects.get_for_model(
ProxyChild, for_concrete_model=False
)
self.assertEqual(ProxyChild, ct.model_class())
def test_proxy_model_inheritance(self):
"""
Polymorphic abilities should also work when the base model is a proxy
object.
"""
# The managers should point to the proper objects.
# otherwise, the whole excersise is pointless.
self.assertEqual(ProxiedBase.objects.model, ProxiedBase)
self.assertEqual(ProxyModelBase.objects.model, ProxyModelBase)
self.assertEqual(ProxyModelA.objects.model, ProxyModelA)
self.assertEqual(ProxyModelB.objects.model, ProxyModelB)
# Create objects
ProxyModelA.objects.create(name="object1")
ProxyModelB.objects.create(name="object2", field2="bb")
# Getting single objects
object1 = ProxyModelBase.objects.get(name='object1')
object2 = ProxyModelBase.objects.get(name='object2')
self.assertEqual(
repr(object1),
'<ProxyModelA: id 1, name (CharField) "object1", field1 (CharField) "">' # noqa
)
self.assertEqual(
repr(object2),
'<ProxyModelB: id 2, name (CharField) "object2", field2 (CharField) "bb">' # noqa
)
self.assertIsInstance(object1, ProxyModelA)
self.assertIsInstance(object2, ProxyModelB)
# Same for lists
objects = list(ProxyModelBase.objects.all().order_by('name'))
self.assertEqual(
repr(objects[0]),
'<ProxyModelA: id 1, name (CharField) "object1", field1 (CharField) "">' # noqa
)
self.assertEqual(
repr(objects[1]),
'<ProxyModelB: id 2, name (CharField) "object2", field2 (CharField) "bb">' # noqa
)
self.assertIsInstance(objects[0], ProxyModelA)
self.assertIsInstance(objects[1], ProxyModelB)
def test_custom_pk(self):
CustomPkBase.objects.create(b='b')
CustomPkInherit.objects.create(b='b', i='i')
qs = CustomPkBase.objects.all()
self.assertEqual(len(qs), 2)
self.assertEqual(
repr(qs[0]),
'<CustomPkBase: id 1, b (CharField) "b">'
)
self.assertEqual(
repr(qs[1]),
'<CustomPkInherit: id 2, b (CharField) "b", custom_id (AutoField/pk) 1, i (CharField) "i">' # noqa
)
def test_fix_getattribute(self):
# fixed issue in PolymorphicModel.__getattribute__: field name same as
# model name
o = ModelFieldNameTest.objects.create(modelfieldnametest='1')
self.assertEqual(
repr(o),
'<ModelFieldNameTest: id 1, modelfieldnametest (CharField)>'
)
# if subclass defined __init__ and accessed class members,
# __getattribute__ had a problem: "...has no attribute
# 'sub_and_superclass_dict'"
o = InitTestModelSubclass.objects.create()
self.assertEqual(o.bar, 'XYZ')
def test_parent_link_and_related_name(self):
t = TestParentLinkAndRelatedName(field1="TestParentLinkAndRelatedName")
t.save()
p = ModelShow1_plain.objects.get(field1="TestParentLinkAndRelatedName")
# check that p is equal to the
self.assertIsInstance(p, TestParentLinkAndRelatedName)
self.assertEqual(p, t)
# check that the accessors to parent and sublass work correctly and
# return the right object:
p = ModelShow1_plain.objects.non_polymorphic().get(
field1="TestParentLinkAndRelatedName"
)
# p should be Plain1 and t TestParentLinkAndRelatedName, so not equal:
self.assertNotEqual(p, t)
self.assertEqual(p, t.superclass)
self.assertEqual(p.related_name_subclass, t)
# test that we can delete the object
t.delete()
class RegressionTests(TestCase):
def test_for_query_result_incomplete_with_inheritance(self):
""" https://github.com/bconstantin/django_polymorphic/issues/15 """
top = Top()
top.save()
middle = Middle()
middle.save()
bottom = Bottom()
bottom.save()
expected_queryset = [top, middle, bottom]
self.assertQuerysetEqual(
Top.objects.all(), [repr(r) for r in expected_queryset]
)
expected_queryset = [middle, bottom]
self.assertQuerysetEqual(
Middle.objects.all(), [repr(r) for r in expected_queryset]
)
expected_queryset = [bottom]
self.assertQuerysetEqual(
Bottom.objects.all(), [repr(r) for r in expected_queryset]
)
| {
"content_hash": "6616cb916bb3d989d3e28b633aed1186",
"timestamp": "",
"source": "github",
"line_count": 1373,
"max_line_length": 174,
"avg_line_length": 34.196649672250544,
"alnum_prop": 0.6110921792468904,
"repo_name": "hobarrera/django-polymorphic-ng",
"id": "b3e0c28710acd1cf90c0893e90c524641dcf2661",
"size": "46976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polymorphic/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "679"
},
{
"name": "Python",
"bytes": "170285"
}
],
"symlink_target": ""
} |
"""Support for Ecobee binary sensors."""
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_OCCUPANCY,
BinarySensorDevice,
)
from .const import _LOGGER, DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up ecobee binary (occupancy) sensors."""
data = hass.data[DOMAIN]
dev = list()
for index in range(len(data.ecobee.thermostats)):
for sensor in data.ecobee.get_remote_sensors(index):
for item in sensor["capability"]:
if item["type"] != "occupancy":
continue
dev.append(EcobeeBinarySensor(data, sensor["name"], index))
async_add_entities(dev, True)
class EcobeeBinarySensor(BinarySensorDevice):
"""Representation of an Ecobee sensor."""
def __init__(self, data, sensor_name, sensor_index):
"""Initialize the Ecobee sensor."""
self.data = data
self._name = sensor_name + " Occupancy"
self.sensor_name = sensor_name
self.index = sensor_index
self._state = None
@property
def name(self):
"""Return the name of the Ecobee sensor."""
return self._name.rstrip()
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] == self.sensor_name:
if "code" in sensor:
return f"{sensor['code']}-{self.device_class}"
thermostat = self.data.ecobee.get_thermostat(self.index)
return f"{thermostat['identifier']}-{sensor['id']}-{self.device_class}"
@property
def device_info(self):
"""Return device information for this sensor."""
identifier = None
model = None
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
if "code" in sensor:
identifier = sensor["code"]
model = "ecobee Room Sensor"
else:
thermostat = self.data.ecobee.get_thermostat(self.index)
identifier = thermostat["identifier"]
try:
model = (
f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
)
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/home-assistant/issues/27172 "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
break
if identifier is not None and model is not None:
return {
"identifiers": {(DOMAIN, identifier)},
"name": self.sensor_name,
"manufacturer": MANUFACTURER,
"model": model,
}
return None
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state == "true"
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return DEVICE_CLASS_OCCUPANCY
async def async_update(self):
"""Get the latest state of the sensor."""
await self.data.update()
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
for item in sensor["capability"]:
if item["type"] != "occupancy":
continue
self._state = item["value"]
break
| {
"content_hash": "e8ba8c5c15ac800ae5554fc874d54213",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 88,
"avg_line_length": 36.38532110091743,
"alnum_prop": 0.5479072112960162,
"repo_name": "postlund/home-assistant",
"id": "a4062905eaa174f7a21f2f668d33a2a4ec4d72fd",
"size": "3966",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ecobee/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("..")
from client.database.models import *
from client.log import log
from client.database import db_adapter
from client.constants import HTTP_HEADER
from client.functions import safe_get_config, get_now
from flask import request, g
import uuid
from client.md5 import encode
from datetime import timedelta
class UserManager(object):
def __init__(self, db_adapter):
self.db = db_adapter
def __generate_api_token(self, admin):
token_issue_date = get_now()
token_expire_date = token_issue_date + timedelta(
minutes=safe_get_config("login/token_expiration_minutes", 1440))
user_token = UserToken(token=str(uuid.uuid1()),
user=admin,
expire_date=token_expire_date,
issue_date=token_issue_date)
self.db.add_object(user_token)
return user_token
def __validate_token(self, token):
t = self.db.find_first_object(UserToken, token=token)
if t is not None and t.expire_date >= get_now():
return t.user
return None
def __create_or_update_email(self, user, email_info):
email = email_info['email']
primary_email = email_info['primary']
verified = email_info['verified']
existed = self.db.find_first_object_by(UserEmail, email=email)
if existed is None:
user_email = UserEmail(name=user.name,
email=email,
primary_email=primary_email,
verified=verified,
user=user)
self.db.add_object(user_email)
else:
existed.primary_email = primary_email
existed.verified = verified
existed.name = user.name
self.db.commit()
def __get_existing_user(self, openid, email_list):
# find user by email first in case that email registered in multiple oauth providers
emails = [e["email"] for e in email_list]
if len(emails):
ues = self.db.find_first_object(UserEmail, UserEmail.email.in_(emails))
if ues is not None:
return ues.user
return self.db.find_first_object_by(User, openid=openid)
def db_logout(self, admin):
try:
self.db.update_object(admin, online=0)
self.db.commit()
return True
except Exception as e:
log.error(e)
return False
def mysql_login(self, user, pwd):
enc_pwd = encode(pwd)
admin = self.db.find_first_object_by(User, name=user, password=enc_pwd)
if admin is None:
log.warn("invalid user/pwd login: user=%s, encoded pwd=%s" % (user, enc_pwd))
return None
token = self.__generate_api_token(admin)
return {
"token": token,
"admin": admin
}
def oauth_db_login(self, openid, **kwargs):
# update db
email_list = kwargs['email_list']
admin = self.__get_existing_user(openid, email_list)
if admin is not None:
self.db.update_object(admin,
provider=kwargs["provider"],
name=kwargs["name"],
nickname=kwargs["nickname"],
access_token=kwargs["access_token"],
avatar_url=kwargs["avatar_url"],
last_login_time=get_now(),
online=1)
map(lambda x: self.__create_or_update_email(admin, x), email_list)
else:
admin = User(openid=openid,
name=kwargs["name"],
provider=kwargs["provider"],
nickname=kwargs["nickname"],
access_token=kwargs["access_token"],
avatar_url=kwargs["avatar_url"],
online=1)
self.db.add_object(admin)
map(lambda x: self.__create_or_update_email(admin, x), email_list)
# generate API token
token = self.__generate_api_token(admin)
return {
"token": token,
"admin": admin
}
def validate_request(self):
if HTTP_HEADER.TOKEN not in request.headers:
return False
admin = self.__validate_token(request.headers[HTTP_HEADER.TOKEN])
if admin is None:
return False
g.admin = admin
return True
def get_user_by_id(self, id):
return self.db.find_first_object_by(User, id=id)
def get_admin_info(self, admin):
return {
"id": admin.id,
"name": admin.name,
"nickname": admin.nickname,
"emails": [e.dic() for e in admin.emails.all()],
"avatar_url": admin.avatar_url,
"online": admin.online,
"create_time": str(admin.create_time),
"last_login_time": str(admin.last_login_time)
}
def get_hackid_from_adminid(self, admin_id):
admin_user_hackathon_rels = self.db.find_all_objects_by(AdminHackathonRel, user_id=admin_id)
if len(admin_user_hackathon_rels) == 0:
return []
# get hackathon_ids_from AdminUserHackathonRels details
hackathon_ids = map(lambda x: x.hackathon_id, admin_user_hackathon_rels)
return list(set(hackathon_ids))
def is_super(self, admin_id):
return -1 in self.get_hackid_from_adminid(admin_id)
user_manager = UserManager(db_adapter)
def is_super(admin):
return user_manager.is_super(admin.id)
User.is_super = is_super
| {
"content_hash": "88c0157829f8e0e4601cd17f730b43da",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 100,
"avg_line_length": 34.712574850299404,
"alnum_prop": 0.5451095394169398,
"repo_name": "Fendoe/open-hackathon-o",
"id": "0d9f8341d34a8af2dd8a415094157a92f49bd61d",
"size": "7165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open-hackathon-client/src/client/user/user_mgr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "109082"
},
{
"name": "HTML",
"bytes": "426116"
},
{
"name": "Java",
"bytes": "12108"
},
{
"name": "JavaScript",
"bytes": "414512"
},
{
"name": "Python",
"bytes": "2270532"
},
{
"name": "Ruby",
"bytes": "1518308"
},
{
"name": "Shell",
"bytes": "18652"
}
],
"symlink_target": ""
} |
"""
https://developer.openstack.org/api-ref/identity/v3-ext/#os-ep-filter-api
"""
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class EndPointsFilterClient(rest_client.RestClient):
api_version = "v3"
ep_filter = "OS-EP-FILTER"
def list_projects_for_endpoint(self, endpoint_id):
"""List all projects that are associated with the endpoint."""
resp, body = self.get(self.ep_filter + '/endpoints/%s/projects' %
endpoint_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def add_endpoint_to_project(self, project_id, endpoint_id):
"""Add association between project and endpoint. """
body = None
resp, body = self.put(
self.ep_filter + '/projects/%s/endpoints/%s' %
(project_id, endpoint_id), body)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def check_endpoint_in_project(self, project_id, endpoint_id):
"""Check association of Project with Endpoint."""
resp, body = self.head(
self.ep_filter + '/projects/%s/endpoints/%s' %
(project_id, endpoint_id), None)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def list_endpoints_in_project(self, project_id):
"""List Endpoints associated with Project."""
resp, body = self.get(self.ep_filter + '/projects/%s/endpoints'
% project_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_endpoint_from_project(self, project_id, endpoint_id):
"""Delete association between project and endpoint."""
resp, body = self.delete(
self.ep_filter + '/projects/%s/endpoints/%s'
% (project_id, endpoint_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
| {
"content_hash": "e8d3b9425d800508415a685a24d284aa",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 73,
"avg_line_length": 40.075471698113205,
"alnum_prop": 0.6271186440677966,
"repo_name": "Juniper/tempest",
"id": "a8cd7222e36b2bb3847915ca9389a0853aed01d5",
"size": "2750",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/lib/services/identity/v3/endpoint_filter_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4194970"
},
{
"name": "Shell",
"bytes": "19343"
}
],
"symlink_target": ""
} |
"""Tests for ncolony.beatcheck"""
import functools
import os
import shutil
import time
import unittest
from twisted.python import filepath, usage
from twisted.application import internet as tainternet
from ncolony import beatcheck, ctllib
from ncolony.client.tests import test_heart
from ncolony.tests import helper
class TestBeatChecker(unittest.TestCase):
"""Test the beat checker"""
def setUp(self):
self.path = os.path.abspath('dummy-config')
self.messages = os.path.abspath('dummy-messages')
self.status = os.path.abspath('dummy-status')
paths = (self.path, self.status, self.messages)
def _cleanup():
for path in paths:
if os.path.exists(path):
shutil.rmtree(path)
_cleanup()
self.addCleanup(_cleanup)
for path in paths:
os.makedirs(path)
self.filepath = filepath.FilePath(self.path)
self.checker = functools.partial(beatcheck.check, self.filepath)
def test_empty_dir(self):
"""Test checking an empty config directory"""
self.assertFalse(self.checker(0, 0))
def test_no_heart(self):
"""Test checking a config directory with one file that does not beat"""
check = {}
jsonCheck = helper.dumps2utf8(check)
fooFile = self.filepath.child('foo')
fooFile.setContent(jsonCheck)
mtime = fooFile.getModificationTime()
self.assertFalse(self.checker(mtime, mtime))
def test_one_check(self):
"""Test checking a config directory with one file"""
status = os.path.join(self.status, 'foo')
check = {'ncolony.beatcheck':
{'period': 10, 'grace': 1, 'status': status}}
jsonCheck = helper.dumps2utf8(check)
fooFile = self.filepath.child('foo')
fooFile.setContent(jsonCheck)
mtime = fooFile.getModificationTime()
self.assertFalse(self.checker(mtime, mtime))
self.assertFalse(self.checker(mtime, mtime+9))
self.assertEquals(self.checker(mtime, mtime+20), ['foo'])
statusFile = filepath.FilePath(status)
statusFile.setContent(b"111")
newMTime = statusFile.getModificationTime()
newMTime += 100
# Back...to the future
statusFile.changed()
os.utime(status, (newMTime, newMTime))
self.assertFalse(self.checker(mtime, newMTime))
self.assertFalse(self.checker(mtime, newMTime+9))
self.assertEquals(self.checker(mtime, newMTime+11), ['foo'])
def test_one_default_check(self):
"""Test checking a config directory with one file"""
status = os.path.join(self.status, 'foo')
check = {'ncolony.beatcheck':
{'period': 10, 'grace': 1, 'status': self.status}}
jsonCheck = helper.dumps2utf8(check)
fooFile = self.filepath.child('foo')
fooFile.setContent(jsonCheck)
mtime = fooFile.getModificationTime()
statusFile = filepath.FilePath(status, 'foo')
statusFile.setContent(b"111")
newMTime = statusFile.getModificationTime()
newMTime += 100
# Back...to the future
statusFile.changed()
os.utime(status, (newMTime, newMTime))
self.assertFalse(self.checker(mtime, newMTime))
def test_grace(self):
"""Test checking that grace period is respected"""
status = os.path.join(self.status, 'foo')
check = {'ncolony.beatcheck':
{'period': 10, 'grace': 3, 'status': status}}
jsonCheck = helper.dumps2utf8(check)
fooFile = self.filepath.child('foo')
fooFile.setContent(jsonCheck)
mtime = fooFile.getModificationTime()
self.assertFalse(self.checker(mtime, mtime))
self.assertFalse(self.checker(mtime, mtime+29))
self.assertEquals(self.checker(mtime, mtime+31), ['foo'])
def test_epoch(self):
"""Test that start time is being respected"""
status = os.path.join(self.status, 'foo')
check = {'ncolony.beatcheck':
{'period': 10, 'grace': 1, 'status': status}}
jsonCheck = helper.dumps2utf8(check)
fooFile = self.filepath.child('foo')
fooFile.setContent(jsonCheck)
mtime = fooFile.getModificationTime()
self.assertFalse(self.checker(mtime+100, mtime+100))
self.assertFalse(self.checker(mtime+100, mtime+101))
self.assertEquals(self.checker(mtime+100, mtime+111), ['foo'])
def test_two_gone(self):
"""Test two configuration files with no status"""
mtime = 0
for fname in ['foo', 'bar']:
status = os.path.join(self.status, fname)
check = {'ncolony.beatcheck':
{'period': 10, 'grace': 1, 'status': status}}
jsonCheck = helper.dumps2utf8(check)
fileObj = self.filepath.child(fname)
fileObj.setContent(jsonCheck)
mtime = max([mtime, fileObj.getModificationTime()])
self.assertFalse(self.checker(mtime, mtime))
self.assertEquals(set(self.checker(mtime, mtime+11)),
set(['foo', 'bar']))
def test_two_old(self):
"""Test two configuration files with old status"""
mtime = 0
for fname in ['foo', 'bar']:
status = os.path.join(self.status, fname)
check = {'ncolony.beatcheck':
{'period': 10, 'grace': 1, 'status': status}}
jsonCheck = helper.dumps2utf8(check)
fileObj = self.filepath.child(fname)
fileObj.setContent(jsonCheck)
statusFile = filepath.FilePath(status)
statusFile.setContent(b"111")
newMTime = statusFile.getModificationTime()
mtime = max([mtime, newMTime, fileObj.getModificationTime()])
self.assertFalse(self.checker(mtime, mtime))
self.assertEquals(set(self.checker(mtime, mtime+11)),
set(['foo', 'bar']))
def test_run(self):
"""Test the runner"""
_checker_args = []
_restarter_args = []
def _checker(arg):
_checker_args.append(arg)
return ['foo', 'bar']
def _timer():
return 'baz'
def _restarter(thing):
_restarter_args.append(thing)
beatcheck.run(_restarter, _checker, _timer)
self.assertEquals(_checker_args, ['baz'])
self.assertEquals(_restarter_args, ['foo', 'bar'])
def test_make_service(self):
"""Test makeService"""
opt = dict(config='config',
messages='messages',
freq=5)
before = time.time()
masterService = beatcheck.makeService(opt)
service = masterService.getServiceNamed("beatcheck")
after = time.time()
self.assertIsInstance(service, tainternet.TimerService)
self.assertEquals(service.step, 5)
callableThing, args, kwargs = service.call
self.assertIs(callableThing, beatcheck.run)
self.assertFalse(kwargs)
restarter, checker, timer = args
self.assertIs(timer, time.time)
self.assertIs(restarter.func, ctllib.restart)
self.assertFalse(restarter.keywords)
places, = restarter.args
self.assertEquals(places,
ctllib.Places(config='config', messages='messages'))
self.assertIs(checker.func, beatcheck.check)
self.assertFalse(checker.keywords)
path, start = checker.args
self.assertEquals(path.basename(), 'config')
self.assertLessEqual(before, start)
self.assertLessEqual(start, after)
def test_make_service_with_health(self):
"""Test beatcheck with heart beater"""
testWrappedHeart(self, beatcheck.makeService)
def testWrappedHeart(utest, serviceMaker):
"""Service has a child heart beater"""
opt = dict(config='config',
messages='messages',
freq=5)
test_heart.replaceEnvironment(utest)
masterService = serviceMaker(opt)
service = masterService.getServiceNamed('heart')
test_heart.checkHeartService(utest, service)
class TestOptions(unittest.TestCase):
"""Test option parsing"""
def setUp(self):
"""Set up the test"""
self.opt = beatcheck.Options()
self.basic = ['--message', 'message-dir', '--config', 'config-dir']
def test_commandLine_required_messages(self):
"""Test failure on missing command line"""
with self.assertRaises(usage.UsageError):
self.opt.parseOptions(['--config', 'c'])
def test_commandLine_required_config(self):
"""Test failure on missing command line"""
with self.assertRaises(usage.UsageError):
self.opt.parseOptions(['--message', 'm'])
def test_basic(self):
"""Test basic command line parsing"""
self.opt.parseOptions(self.basic)
self.assertEqual(self.opt['messages'], 'message-dir')
self.assertEqual(self.opt['config'], 'config-dir')
self.assertEqual(self.opt['freq'], 10)
def test_freq(self):
"""Test explicit freq"""
self.opt.parseOptions(self.basic+['--freq', '13'])
self.assertEqual(self.opt['freq'], 13)
| {
"content_hash": "fae6532ed898bc1d5a5185e1f22af513",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 79,
"avg_line_length": 38.11522633744856,
"alnum_prop": 0.6102353703303822,
"repo_name": "moshez/ncolony",
"id": "e0bf6d099d00a7dee216c693beb8175aeabe4996",
"size": "9318",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ncolony/tests/test_beatcheck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108990"
}
],
"symlink_target": ""
} |
import email
email.setFileRead("testFile.txt")
email.setFileWrite("testOut.txt")
email.generateEmailList()
email.setFileWrite("testRemove.txt")
email.removePerson("test")
email.setFileWrite("clearTest.txt")
f = open("clearTest.txt" , 'w')
for i in range(0,100):
f.write(str(i) + "\n")
f.close()
email.clearOutFile()
email.setFileWrite("sortTest.txt")
f = open("sortTest.txt", 'w')
f.write("Zaul")
f.write("Xaul")
f.write("Yul")
f.write("Maul")
f.write("Naul")
f.write("Eaul")
f.write("Daul")
f.write("Caul")
f.write("Baul")
f.write("Aaul")
f.close()
email.sortFile("sortTest.txt")
email.usage()
print "All Tests pass"
| {
"content_hash": "e309b32cd3195f52d358ad6b2c331727",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 36,
"avg_line_length": 17.054054054054053,
"alnum_prop": 0.6893819334389857,
"repo_name": "nextBillyonair/cult",
"id": "b2cdf24c6c282c276c98db04e6860b34093a29f4",
"size": "631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Testing/testEmail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6050"
},
{
"name": "HTML",
"bytes": "248704"
},
{
"name": "Python",
"bytes": "102969"
}
],
"symlink_target": ""
} |
{
"name": "Email confirmation on sign up",
"summary": """New user is able to login only after confirming his/her email""",
"vesion": "13.0.1.0.1",
"author": "IT-Projects LLC",
"website": "https://it-projects.info",
"license": "Other OSI approved licence", # MIT
"price": 40.00,
"currency": "EUR",
"depends": ["auth_signup"],
"data": ["data/config.xml", "views/thankyou.xml", "data/email.xml"],
"installable": False,
"post_init_hook": "init_auth",
}
| {
"content_hash": "e70338feead6333c2f85daeb8eca6f13",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 83,
"avg_line_length": 35.57142857142857,
"alnum_prop": 0.5903614457831325,
"repo_name": "it-projects-llc/misc-addons",
"id": "a953e330a30dec8623aa29c6b0fd0d02fa8fc4ca",
"size": "498",
"binary": false,
"copies": "1",
"ref": "refs/heads/13.0",
"path": "auth_signup_confirmation/__manifest__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14551"
},
{
"name": "HTML",
"bytes": "130934"
},
{
"name": "JavaScript",
"bytes": "407608"
},
{
"name": "Python",
"bytes": "414883"
}
],
"symlink_target": ""
} |
from os import environ
from six import string_types
from six.moves import urllib_parse
from .exceptions import InvalidResourcePath
GOOEE_API_URL = environ.get('GOOEE_API_URL', 'https://dev-api.gooee.io/')
GOOEE_API_PATH = urllib_parse.urlparse(GOOEE_API_URL).path
def format_path(path, api_base_url=GOOEE_API_URL):
error_msg = 'The path argument must be a string that begins with "/"'
if not isinstance(path, string_types):
raise InvalidResourcePath(error_msg)
# Using the HTTP shortcut
if path.startswith('/'):
return urllib_parse.urljoin(api_base_url, path.lstrip('/'))
return path
| {
"content_hash": "39c85d5b64fe70cf448147daf09ac852",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 29.857142857142858,
"alnum_prop": 0.7129186602870813,
"repo_name": "GooeeIOT/gooee-python-sdk",
"id": "6b2d6dd77c55f5f7db403c9e3b0f931730a52f39",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gooee/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1772"
},
{
"name": "Python",
"bytes": "14919"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import events.models
class Migration(migrations.Migration):
dependencies = [
('events', '0003_auto_20150416_1853'),
]
operations = [
migrations.AddField(
model_name='recurringrule',
name='duration_internal',
field=models.DurationField(default=events.models.duration_default),
),
migrations.AlterField(
model_name='recurringrule',
name='duration',
field=models.CharField(default='15 min', max_length=50),
),
]
| {
"content_hash": "c5c5e54f8494598856b928811f3f1bf1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 25.958333333333332,
"alnum_prop": 0.608346709470305,
"repo_name": "Mariatta/pythondotorg",
"id": "384bd9e4e85a853073d74e248b3af31bb3456395",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/migrations/0004_auto_20170814_0519.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "193350"
},
{
"name": "HTML",
"bytes": "232812"
},
{
"name": "JavaScript",
"bytes": "18735"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Python",
"bytes": "500650"
},
{
"name": "Ruby",
"bytes": "1895"
}
],
"symlink_target": ""
} |
import imp
import pkg_resources
import sys
class MockProvider(pkg_resources.NullProvider):
def __init__(self, module):
pkg_resources.NullProvider.__init__(self, module)
self.module = module
def _has(self, path):
return path in self.module._resources
def _isdir(self, path):
return False
def get_resource_stream(self, manager, resource_name):
return self.module._resources[resource_name]
def _get(self, path):
return self.module._resources[path].read()
class MockLoader(object):
pass
def create_egg(name, resources):
"""
Creates a mock egg with a list of resources.
name: The name of the module.
resources: A dictionary of resources.
"""
egg = imp.new_module(name)
egg.__loader__ = MockLoader()
egg._resources = resources
sys.modules[name] = egg
| {
"content_hash": "848f6c2ebc5afb53581069cebaf1eb17",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 58,
"avg_line_length": 22.736842105263158,
"alnum_prop": 0.6493055555555556,
"repo_name": "abbas123456/django-right-to-left",
"id": "9ee7910fe4f7241fda3852e33c993560b0821819",
"size": "864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9707"
}
],
"symlink_target": ""
} |
import json
from datetime import datetime, timedelta
import logging
import sys
import StringIO
import caldav
import vobject
import pytz
CONFIG_FILE = "conf.json"
if '-c' in sys.argv:
CONFIG_FILE = sys.argv[2]
CONFIG_DEFAULTS = {
"filter": {
"calendars": [],
"interval": [-7, 365]
},
"display": {
"timezone": "UTC",
"event_format": "{start:%d %b %Y}\t{summary}"
},
}
JET_LAG = timedelta(days=2)
_logger = logging.getLogger(__name__)
def main():
setup_logging()
try:
conf = read_config(CONFIG_FILE)
if '-ls' in sys.argv:
list_calendars(conf)
else:
events = download_upcoming_events(conf)
display_events(events, conf)
except Exception as ex:
uprint(ex)
_logger.exception('traceback has been silenced:')
def setup_logging():
logging.basicConfig(filename="errors.log")
def list_calendars(conf):
calendars = connect(conf['url'])
if len(calendars) > 0:
for calendar in calendars:
uprint(calendar)
else:
uprint('No calendars')
def read_config(conf_filename):
try:
with open(conf_filename) as fd:
conf = json.load(fd)
return validate_config(conf)
except IOError as err:
raise IOError('Config file reading error: {}'.format(err))
except ValueError as err:
raise ValueError('Config json parsing error: {}'.format(err))
def validate_config(conf):
if 'url' not in conf:
raise ValueError('No url is set')
filter = conf.get('filter', CONFIG_DEFAULTS['filter'])
calendars_to_display = filter.get('calendars',
CONFIG_DEFAULTS['filter']['calendars'])
if calendars_to_display is not None:
calendars_to_display = [unicode(c) for c in calendars_to_display]
interval_rel = filter.get('interval',
CONFIG_DEFAULTS['filter']['interval'])
try:
interval_rel[0]+1, interval_rel[1]+1 # make sure they're ints
except IndexError:
raise ValueError('Wrong time interval in config file')
except TypeError:
raise ValueError('Strange time interval values in config file')
sec_display = conf.get('display', CONFIG_DEFAULTS['display'])
ev_format = sec_display.get('event_format',
CONFIG_DEFAULTS['display']['event_format'])
tz_name = sec_display.get('timezone',
CONFIG_DEFAULTS['display']['timezone'])
try:
timezone = pytz.timezone(tz_name)
except:
raise ValueError('Unknown timezone in conf file: "{}"'.format(tz_name))
return {
'url': conf['url'],
'filter': {
'calendars': calendars_to_display,
'interval': interval_rel,
},
'display': {
'timezone': timezone,
'event_format': ev_format,
}
}
def download_upcoming_events(conf):
calendars = connect(conf['url'])
calendars_to_display = conf['filter']['calendars']
interval = get_absolute_interval(conf)
jet_lag_aware_interval = (
min(interval) - JET_LAG,
max(interval) + JET_LAG
)
events = []
if len(calendars) > 0:
for calendar in calendars:
if is_in_display_list(calendar, calendars_to_display):
results = calendar.date_search(
jet_lag_aware_interval[0],
jet_lag_aware_interval[1]
)
for event in results:
events.append(event)
parsed_events = [parse_event(event, conf['display']['timezone'])
for event in events]
parsed_events.sort(key=lambda c: c['start'])
parsed_events = filter_out_jet_lagged_events(parsed_events, interval)
return parsed_events
def connect(url):
try:
client = caldav.DAVClient(url)
principal = client.principal()
return principal.calendars()
except caldav.lib.error.AuthorizationError:
raise IOError('Access denied to server, maybe wrong pass')
except Exception as err:
desc = str(err) or 'i just dont know what went wrong'
raise IOError('Network error: {}'.format(desc))
def get_absolute_interval(conf):
interval_rel = conf['filter']['interval']
timezone = conf['display']['timezone']
today = set_localtime_midnight(datetime.utcnow(), timezone)
interval_abs_sharp = (
today + timedelta(days=interval_rel[0]),
today + timedelta(days=interval_rel[1]),
)
one_second = timedelta(seconds=1)
interval_abs = (
min(interval_abs_sharp) + one_second,
max(interval_abs_sharp) - one_second,
)
return interval_abs
def set_localtime_midnight(utc_time, local_tz):
utc = pytz.utc
utc_aware = utc.localize(utc_time)
local_time = local_tz.normalize(utc_aware.astimezone(local_tz))
local_midnight = local_time.replace(hour=0, minute=0,
second=0, microsecond=0)
local_midnight_in_utc = utc.normalize(local_midnight.astimezone(utc))
return local_midnight_in_utc
def is_in_display_list(calendar, display_list):
if bool(display_list):
calendar = unicode(calendar)
return True if any(
i in calendar
for i in display_list
) else False
else:
return True
def parse_event(caldav_event, timezone):
event_data = caldav_event.get_data()
fd = StringIO.StringIO(event_data)
cal = vobject.readOne(fd)
event = cal.vevent
return {
'start': localize(event.dtstart.value, timezone),
'end': localize(event.dtend.value, timezone),
'summary': event.summary.value
}
def localize(date_or_time, local_tz):
if isinstance(date_or_time, datetime):
time = date_or_time
local_time = local_tz.normalize(time.astimezone(local_tz))
else:
naive_date = date_or_time
local_time = local_tz.localize(datetime(
year=naive_date.year,
month=naive_date.month,
day=naive_date.day,
hour=0, minute=0, second=0))
return local_time
def filter_out_jet_lagged_events(events, interval):
time_from, time_to = interval
return [
event for event in events
if event['end'] >= time_from and event['start'] <= time_to
]
def display_events(events, conf):
ev_format = conf['display']['event_format']
if len(events):
try:
for event in events:
uprint(ev_format.format(**event))
except KeyError as err:
raise ValueError('Wrong event_format line in conf file, at {}'
.format(err))
else:
uprint("No events")
def uprint(str):
u_str = unicode(str)
bytes_str = u_str.encode('utf-8', errors='ignore')
print(bytes_str)
if __name__ == '__main__':
main()
| {
"content_hash": "9d17f31dcf63885f7c56b891cbf59cda",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 79,
"avg_line_length": 29.651063829787233,
"alnum_prop": 0.5917049368541906,
"repo_name": "daladno/upcoming",
"id": "8c6780341a2826677fb45a9ee2eaa58b9b9e75b9",
"size": "6969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "upcoming.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6969"
},
{
"name": "Shell",
"bytes": "478"
}
],
"symlink_target": ""
} |
from .test_citizens_crud import * | {
"content_hash": "4db7034820ab64df3846a2b0b25afe29",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 33,
"alnum_prop": 0.7878787878787878,
"repo_name": "jaconsta/soat_cnpx",
"id": "f5cb515d4c62b9919c1cad6b48cf4ca9212cec9e",
"size": "33",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/citizens/tests_cases/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30418"
},
{
"name": "Shell",
"bytes": "1167"
}
],
"symlink_target": ""
} |
"""
gargoyle.admin
~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from django.contrib import admin
from gargoyle.models import Switch
class SwitchAdmin(admin.ModelAdmin):
list_display = ('label', 'key', 'status')
list_filter = ('status',)
search_fields = ('label', 'key', 'value')
admin.site.register(Switch, SwitchAdmin)
| {
"content_hash": "97bb5f90a679c389b504a6f69c59c081",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 23.095238095238095,
"alnum_prop": 0.688659793814433,
"repo_name": "YPlan/gargoyle",
"id": "4b7eff5bba922fe8ae267b329b419e742e395cc3",
"size": "485",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gargoyle/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6567"
},
{
"name": "HTML",
"bytes": "13316"
},
{
"name": "JavaScript",
"bytes": "8010"
},
{
"name": "Python",
"bytes": "140735"
}
],
"symlink_target": ""
} |
"""
Factory for input methods within a dialog.
"""
from datetime import datetime
from PyQt4 import QtGui, QtCore
from datafinder.core.configuration.properties import constants
from datafinder.gui.user.common.util import extractPyObject
from datafinder.gui.user.common.widget.property.editors.list_editor import ListEditor
from datafinder.gui.user.common.widget.property.editors.text_editor import TextEditor
__version__ = "$Revision-Id:$"
class EditorFactory(object):
"""
This factory produces adequate widgets for editing Qt data types.
"""
def __init__(self):
""" Constructor. """
self._handlingMethods = {constants.STRING_TYPE: self._createStringEditor,
constants.DATETIME_TYPE: self._createDateTimeEditor,
constants.NUMBER_TYPE: self._createDecimalEditor,
constants.LIST_TYPE: self._createListEditor,
constants.BOOLEAN_TYPE: self._createBooleanEditor}
def createEditor(self, parent, objectType, restrictions=dict(), initState=None):
"""
Creates an editor for given object and initializes it correctly.
@param parent: parent of the new editor
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
@param objectType: string that specifies the editor to create
@type object: C{unicode}
@param restrictions: restrictions that should be set for the editor
@type restrictions: C{dict}: string -> int or string
@param initState: a valid init state for an editor (f.e. string for a line edit)
@return: An initialized editor widget
@rtype: L{QWidget<PyQt4.QtGui.QWidget>}
"""
restrictions = restrictions or dict()
try:
editor = self._handlingMethods[unicode(objectType)](restrictions, initState, parent)
except TypeError:
editor = self._handlingMethods[unicode(objectType)](restrictions, None, parent)
except KeyError:
editor = self._createStringEditor(restrictions, initState, parent)
editor.setEnabled(False)
return editor
def _createStringEditor(self, restriction, initData, parent):
"""
Creates an adequate editor for text.
@param restriction: restriction for the input
@type restriction: C{dict}
@param parent: parent of the new editor
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
editor = TextEditor(initData, parent)
if constants.OPTIONS in restriction:
options = [QtCore.QString(item) for item in restriction[constants.OPTIONS]]
editor = self._createSelectionBox(None, options, parent)
return editor
if constants.MAXIMUM_LENGTH in restriction:
editor.setMaxLength(restriction[constants.MAXIMUM_LENGTH])
if constants.PATTERN in restriction:
regEx = QtCore.QString(unicode(restriction[constants.PATTERN]))
regExValidator = QtGui.QRegExpValidator(editor)
regExValidator.setRegExp(QtCore.QRegExp(regEx))
editor.setValidator(regExValidator)
return editor
@staticmethod
def _createSelectionBox(_, listItems, parent):
"""
Creates a combination box.
@param listItems: items of the combination box
@type listItems: C{list} of QString
@param parent: Parent of the new editor
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
editor = QtGui.QComboBox(parent)
editor.addItems(listItems)
return editor
@staticmethod
def _createDateTimeEditor(restriction, _, parent):
"""
Creates an adequate editor for a list
@param restriction: restriction for the input
@type restriction: C{dict}
@param parent: Parent of the new editor
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
editor = QtGui.QDateTimeEdit(parent)
editor.setCalendarPopup(True)
if constants.MINIMUM_VALUE in restriction:
editor.setMinimumDateTime(restriction[constants.MINIMUM_VALUE])
if constants.MAXIMUM_VALUE in restriction:
editor.setMaximumDateTime(restriction[constants.MAXIMUM_VALUE])
editor.setDateTime(datetime.now())
return editor
@staticmethod
def _createDecimalEditor(restriction, _, parent):
"""
Creates an adequate editor for a decimal
@param restriction: restriction for the input
@type restriction: C{dict}
@param parent: Parent of the new editor
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
editor = QtGui.QDoubleSpinBox(parent)
if constants.MINIMUM_VALUE in restriction:
editor.setMinimum(restriction[constants.MINIMUM_VALUE])
if constants.MAXIMUM_VALUE in restriction:
editor.setMaximum(restriction[constants.MAXIMUM_VALUE])
if constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES in restriction:
editor.setDecimals(restriction[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES])
else:
editor.setDecimals(12)
editor.setRange(-9999999999, 9999999999)
return editor
def _createListEditor(self, restrictions, listItems, parent=None):
"""
Creates an adequate editor for a list
@param restriction: restriction for the input
@type restriction: C{dict}
@param listItems: values to be edited
@type listItems: C{list} of plain python types
@param parent: Parent of the new editor
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
listItems = listItems or list()
return ListEditor(restrictions, self, listItems, parent)
@staticmethod
def _createBooleanEditor(_, __, parent):
"""
Creates an adequate editor for a boolean value
@param parent: Parent of the new editor
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
checkbox = QtGui.QCheckBox(parent)
checkbox.setAutoFillBackground(True)
return checkbox
@staticmethod
def getValueFromEditor(editor):
"""
Returns the current value of the editor
@param editor: Editor to get value from
@type editor: L{QWidget<PyQt4.QtGui.QWidget>}
"""
returnValue = None
if type(editor) == QtGui.QDateTimeEdit:
returnValue = editor.dateTime()
elif type(editor) == QtGui.QDoubleSpinBox:
returnValue = editor.value()
elif type(editor) == QtGui.QCheckBox:
returnValue = editor.isChecked()
elif type(editor) == QtGui.QComboBox:
returnValue = editor.currentText()
elif type(editor) == ListEditor:
returnValue = editor.value
elif isinstance(editor, QtGui.QLineEdit):
currentText = editor.text()
if len(currentText) > 0:
returnValue = currentText
return returnValue
@staticmethod
def setEditorValue(editor, value):
"""
Sets the value of the editor.
"""
value = extractPyObject(value)
if value is None:
return value
try:
if isinstance(editor, QtGui.QLineEdit):
editor.setText(value)
elif type(editor) == QtGui.QDateTimeEdit:
editor.setDateTime(value)
elif type(editor) == QtGui.QDoubleSpinBox:
editor.setValue(value)
elif type(editor) == QtGui.QCheckBox:
editor.setChecked(value)
except TypeError:
return
| {
"content_hash": "eff8d23f01eef79275d871e951fdc585",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 96,
"avg_line_length": 37.57339449541284,
"alnum_prop": 0.5979733854230252,
"repo_name": "DLR-SC/DataFinder",
"id": "743d87f0f4f42979b21bc54ad6dfed02dad9c075",
"size": "9885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/datafinder/gui/user/common/widget/property/editors/factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
} |
import os
import fixtures
from oslo.config import cfg
import inspect
from nova import exception
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_processutils
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova import unit
from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
class _ImageTestCase(object):
INSTANCES_PATH = '/instances_path'
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
self.INSTANCE = {'name': 'instance',
'uuid': uuidutils.generate_uuid()}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.OLD_STYLE_INSTANCE_PATH = \
fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, '_can_fallocate', lambda: True)
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RawTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Raw
super(RawTestCase, self).setUp()
self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
self.mox.VerifyAll()
def test_create_image_generated(self):
fn = self.prepare_mocks()
fn(target=self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_extend(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
self.mox.VerifyAll()
def test_correct_format(self):
info = self.mox.CreateMockAnything()
self.stubs.UnsetAll()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
os.path.exists(self.PATH).AndReturn(True)
info = self.mox.CreateMockAnything()
info.file_format = 'foo'
imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
self.mox.VerifyAll()
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = unit.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / unit.Gi))
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'create_cow_image')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_too_small(self):
fn = self.prepare_mocks()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.FlavorDiskTooSmall,
image.create_image, fn, self.TEMPLATE_PATH, 1)
self.mox.VerifyAll()
def test_generate_resized_backing_files(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(self.QCOW2_BASE)
os.path.exists(self.QCOW2_BASE).AndReturn(False)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
self.QCOW2_BASE)
imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_qcow2_exists_and_has_no_backing_file(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(None)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(images_volume_group=self.VG, group='libvirt')
self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.disk, 'resize2fs')
self.mox.StubOutWithMock(self.libvirt_utils, 'create_lvm_image')
self.mox.StubOutWithMock(self.disk, 'get_disk_size')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def _create_image(self, sparse):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def _create_image_generated(self, sparse):
fn = self.prepare_mocks()
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn(target=self.PATH, ephemeral_size=None)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
self.mox.VerifyAll()
def _create_image_resize(self, sparse):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False
).AndRaise(RuntimeError())
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_generated_negative(self):
fn = self.prepare_mocks()
fn(target=self.PATH,
ephemeral_size=None).AndRaise(RuntimeError())
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(images_rbd_pool=self.POOL,
rbd_user=self.USER,
images_rbd_ceph_conf=self.CONF,
group='libvirt')
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
self.rbd = self.mox.CreateMockAnything()
self.rados = self.mox.CreateMockAnything()
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend, 'rbd')
self.mox.StubOutWithMock(imagebackend, 'rados')
return fn
def test_cache(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
image.check_image_exists().AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, rbd=self.rbd, target=self.TEMPLATE_PATH)
self.rbd.RBD_FEATURE_LAYERING = 1
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
rbd_name = "%s/%s" % (self.INSTANCE['name'], self.NAME)
cmd = ('--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
self.libvirt_utils.import_rbd_image(self.TEMPLATE_PATH, *cmd)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, rbd=self.rbd)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
self.mox.StubOutWithMock(imagebackend, 'rbd')
self.mox.StubOutWithMock(imagebackend, 'rados')
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
def fake_resize(rbd_name, size):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
inspect.getargspec(self.image_class.libvirt_info))
class BackendTestCase(test.NoDBTestCase):
INSTANCE = {'name': 'fake-instance',
'uuid': uuidutils.generate_uuid()}
NAME = 'fake-name.suffix'
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).image(self.INSTANCE,
self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_raw(self):
self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_lvm(self):
self.flags(images_volume_group='FakeVG', group='libvirt')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
def test_image_rbd(self):
conf = "FakeConf"
pool = "FakePool"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
| {
"content_hash": "83dc396000818d4313009be7354355bb",
"timestamp": "",
"source": "github",
"line_count": 672,
"max_line_length": 78,
"avg_line_length": 38.232142857142854,
"alnum_prop": 0.61657325237428,
"repo_name": "sacharya/nova",
"id": "1d553218df386ca8f2220837b7cf3f6e86f063e4",
"size": "26366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/virt/libvirt/test_imagebackend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13505239"
},
{
"name": "Shell",
"bytes": "16239"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns('game.views',
('^question/(?P<op>.+)$','question'),
)
| {
"content_hash": "76b37f07023459fbccad96af62c6755c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 23.8,
"alnum_prop": 0.6638655462184874,
"repo_name": "bernardokyotoku/skillplant",
"id": "e26d93413eee84d1d04a3c7c2e1769e1426a667a",
"size": "119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "103281"
},
{
"name": "Python",
"bytes": "4219238"
},
{
"name": "Shell",
"bytes": "500"
}
],
"symlink_target": ""
} |
import os,shutil
"""
A little script to wrap the params enum for use in Cython code
Ian Bell, Feb 2013
"""
def params_constants():
fName = os.path.join('..','..','CoolProp','GlobalConstants.h')
contents = open(fName,'r').read().replace('\n','')
# Find the block that is something like :
"""
/* These are constants for the input and output parameters */
enum params {
/* Properties and others */
iB, iT, iP, iD, iC, iC0, iO, iU, iH, iS, iA, iG, iQ, iV, iL, iTfreeze, iPsat, iI, iDpdT, iDrhodT_p, iCritSplineT, iPrandtl,
/* Constants */
iMM, iTmax, iTmin, iAccentric, iDipole, iODP, iGWP20, iGWP100, iGWP500,
/* Reduced quantities; triple point and critical point */
iRhoreduce, iTreduce, iPtriple, iTtriple, iHcrit, iPcrit, iRhocrit, iScrit, iTcrit,
/* Phase identifiers */
iPhase, iPHASE_LIQUID, iPHASE_GAS, iPHASE_SUPERCRITICAL, iPHASE_TWOPHASE,
/* Derivatives */
iDERdh_dp__rho, iDERdh_dp__v, iDERZ, iDERdZ_dDelta, iDERdZ_dTau, iDERB, iDERdB_dT, iDERC, iDERdC_dT, iDERphir,
iDERdphir_dTau, iDERdphir_dDelta, iDERd2phir_dTau2, iDERd2phir_dDelta2, iDERd2phir_dDelta_dTau, iDERd3phir_dDelta3,
iDERd3phir_dDelta2_dTau, iDERd3phir_dDelta_dTau2, iDERd3phir_dTau3, iDERphi0, iDERdphi0_dTau, iDERd2phi0_dTau2,
iDERdphi0_dDelta, iDERd2phi0_dDelta2, iDERd2phi0_dDelta_dTau, iDERd3phi0_dTau3, iDERdp_dT__rho,
iDERdp_drho__T, iDERdh_dT__rho, iDERdh_drho__T, iDERdrho_dT__p, iDERdrho_dh__p,
iDERdrho_dp__h, iDERrho_smoothed, iDERdrho_smoothed_dh, iDERdrho_smoothed_dp, iDERdrhodh_constp_smoothed,
iDERdrhodp_consth_smoothed, iDERIsothermalCompressibility,
};
"""
left = contents.find('{', contents.find('enum params'));
right = contents.find('}')
entries = contents[left+1:right].split(',')
print entries
import re
entries = [re.sub(r"/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/", "", e.strip()) for e in entries]
entries = filter(lambda e: len(e) > 0, entries)
#Write the PXD definition file
pxd_output_file = open('param_constants_header.pxd','w')
pxd_output_file.write('#This file is automatically generated by the generate_constants_module.py script in dev/scripts.\n#DO NOT MODIFY THE CONTENTS OF THIS FILE!\n\ncdef extern from "CoolProp.h":\n\tenum params:\n')
for param in entries:
param = param.strip()
pxd_output_file.write('\t\t'+param+'\n')
pxd_output_file.close()
#Write the PYX implementation file
pyx_output_file = open('param_constants.pyx','w')
pyx_output_file.write('#This file is automatically generated by the generate_constants_module.py script in dev/scripts.\n#DO NOT MODIFY THE CONTENTS OF THIS FILE!\ncimport param_constants_header\n\n')
for param in entries:
param = param.strip()
pyx_output_file.write(param+' = '+'param_constants_header.'+param+'\n')
pyx_output_file.close()
shutil.copy2('param_constants_header.pxd',os.path.join('..','..','wrappers','Python','CoolProp','param_constants_header.pxd'))
shutil.copy2('param_constants.pyx',os.path.join('..','..','wrappers','Python','CoolProp','param_constants.pyx'))
def phase_constants():
fName = os.path.join('..','..','CoolProp','GlobalConstants.h')
with open(fName,'r') as f:
lines = f.readlines()
#Find the line that is something like : enum phases {iLiquid, iSupercritical, iGas, iTwoPhase};
the_line = None
for line in lines:
if line.find('enum phases') > -1:
the_line = line.strip()
break
if the_line is None:
raise ValueError('enum params line was not found in CoolProp.h')
else:
the_line = the_line.replace('}','{')
the_params = the_line.split('{')[1].split(',')
#Write the PXD definition file
pxd_output_file = open('phase_constants_header.pxd','w')
pxd_output_file.write('#This file is automatically generated by the generate_constants_module.py script in dev/scripts.\n#DO NOT MODIFY THE CONTENTS OF THIS FILE!\n\ncdef extern from "CoolProp.h":\n\tenum phase:\n')
for param in the_params:
param = param.strip()
pxd_output_file.write('\t\t'+param+'\n')
pxd_output_file.close()
#Write the PYX implementation file
pyx_output_file = open('phase_constants.pyx','w')
pyx_output_file.write('#This file is automatically generated by the generate_constants_module.py script in dev/scripts.\n#DO NOT MODIFY THE CONTENTS OF THIS FILE!\ncimport phase_constants_header\n\n')
for param in the_params:
param = param.strip()
pyx_output_file.write(param+' = '+'phase_constants_header.'+param+'\n')
pyx_output_file.close()
shutil.copy2('phase_constants_header.pxd',os.path.join('..','..','wrappers','Python','CoolProp','phase_constants_header.pxd'))
shutil.copy2('phase_constants.pyx',os.path.join('..','..','wrappers','Python','CoolProp','phase_constants.pyx'))
def unit_systems_constants():
fName = os.path.join('..','..','CoolProp','GlobalConstants.h')
with open(fName,'r') as f:
lines = f.readlines()
#Find the line that is something like : enum phases {iLiquid, iSupercritical, iGas, iTwoPhase};
the_line = None
for line in lines:
if line.find('enum unit_systems') > -1:
the_line = line.strip()
break
if the_line is None:
raise ValueError('unit_systems line was not found in CoolProp.h')
else:
the_line = the_line.replace('}','{')
the_params = the_line.split('{')[1].split(',')
#Write the PXD definition file
pxd_output_file = open('unit_systems_constants_header.pxd','w')
pxd_output_file.write('#This file is automatically generated by the generate_constants_module.py script in dev/scripts.\n#DO NOT MODIFY THE CONTENTS OF THIS FILE!\n\ncdef extern from "CoolProp.h":\n\tenum unit_systems:\n')
for param in the_params:
param = param.strip()
pxd_output_file.write('\t\t'+param+'\n')
pxd_output_file.close()
#Write the PYX implementation file
pyx_output_file = open('unit_systems_constants.pyx','w')
pyx_output_file.write('#This file is automatically generated by the generate_constants_module.py script in dev/scripts.\n#DO NOT MODIFY THE CONTENTS OF THIS FILE!\ncimport unit_systems_constants_header\n\n')
for param in the_params:
param = param.strip()
pyx_output_file.write(param+' = '+'unit_systems_constants_header.'+param+'\n')
pyx_output_file.close()
shutil.copy2('unit_systems_constants_header.pxd',os.path.join('..','..','wrappers','Python','CoolProp','unit_systems_constants_header.pxd'))
shutil.copy2('unit_systems_constants.pyx',os.path.join('..','..','wrappers','Python','CoolProp','unit_systems_constants.pyx'))
if __name__=='__main__':
params_constants()
phase_constants()
unit_systems_constants() | {
"content_hash": "77d0f3bdee76ad2bdc37a31e4a11b055",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 230,
"avg_line_length": 48.87074829931973,
"alnum_prop": 0.6340478841870824,
"repo_name": "ibell/coolprop",
"id": "6476dfebd64164abe3ea915747dd75520d85472f",
"size": "7184",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dev/scripts/generate_constants_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3838027"
},
{
"name": "C#",
"bytes": "37000"
},
{
"name": "C++",
"bytes": "3817643"
},
{
"name": "CSS",
"bytes": "10460"
},
{
"name": "FORTRAN",
"bytes": "2453"
},
{
"name": "Java",
"bytes": "11956"
},
{
"name": "M",
"bytes": "183"
},
{
"name": "Makefile",
"bytes": "25938"
},
{
"name": "Mathematica",
"bytes": "3306"
},
{
"name": "Matlab",
"bytes": "9575"
},
{
"name": "Objective-C",
"bytes": "6434"
},
{
"name": "PHP",
"bytes": "128"
},
{
"name": "Python",
"bytes": "991100"
},
{
"name": "Scilab",
"bytes": "774"
},
{
"name": "Shell",
"bytes": "30128"
},
{
"name": "TeX",
"bytes": "58925"
},
{
"name": "Visual Basic",
"bytes": "18659"
}
],
"symlink_target": ""
} |
__about__ = """
In addition to what is provided by the "zero" project, this project
provides thorough integration with django-user-accounts, adding
comprehensive account management functionality. It is a foundation
suitable for most sites that have user accounts.
"""
default_app_config = "testapp.apps.AppConfig"
| {
"content_hash": "2f22244f2db451e57c7c9b5888eba627",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 67,
"avg_line_length": 39.375,
"alnum_prop": 0.7841269841269841,
"repo_name": "kevana/django-test",
"id": "8d96b22a34d5a6bd0d2f93d2716d959cac523eab",
"size": "340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testapp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27656"
},
{
"name": "HTML",
"bytes": "4751"
},
{
"name": "Makefile",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "8736"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
requirements = ['lz4tools==1.3.1.2',
'numpy',
'py==1.4.31',
'pytest==3.0.3']
setup(name='slisonner',
version='0.7.9',
description='Habidatum Chronotope Slison encode/decode utility',
long_description='',
author='Nikita Pestrov',
author_email='nikita.pestrov@habidatum.com',
maintainer='Nikita Pestrov',
maintainer_email='nikita.pestrov@habidatum.com',
packages=find_packages(),
install_requires=requirements,
platforms='any',
classifiers=['Programming Language :: Python :: 3.4'])
| {
"content_hash": "efd2f29f1e575657e80225c40478ea4e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 29.7,
"alnum_prop": 0.6632996632996633,
"repo_name": "Habidatum/slisonner",
"id": "12953800b5b6ce796066391662bc4943c6f2fee9",
"size": "618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6387"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# BEWARE don't put anything in this file except exceptions
from werkzeug.exceptions import NotFound
from MySQLdb import ProgrammingError as SQLError, Error
from MySQLdb import OperationalError as DatabaseOperationalError
class ValidationError(Exception):
http_status_code = 417
class AuthenticationError(Exception):
http_status_code = 401
class SessionExpired(Exception):
http_status_code = 401
class PermissionError(Exception):
http_status_code = 403
class DoesNotExistError(ValidationError):
http_status_code = 404
class NameError(Exception):
http_status_code = 409
class OutgoingEmailError(Exception):
http_status_code = 501
class SessionStopped(Exception):
http_status_code = 503
class UnsupportedMediaType(Exception):
http_status_code = 415
class RequestToken(Exception):
http_status_code = 200
class Redirect(Exception):
http_status_code = 301
class CSRFTokenError(Exception):
http_status_code = 400
class ImproperDBConfigurationError(Error):
"""
Used when frappe detects that database or tables are not properly
configured
"""
def __init__(self, reason, msg=None):
if not msg:
msg = "MariaDb is not properly configured"
super(ImproperDBConfigurationError, self).__init__(msg)
self.reason = reason
class DuplicateEntryError(NameError):pass
class DataError(ValidationError): pass
class UnknownDomainError(Exception): pass
class MappingMismatchError(ValidationError): pass
class InvalidStatusError(ValidationError): pass
class MandatoryError(ValidationError): pass
class InvalidSignatureError(ValidationError): pass
class RateLimitExceededError(ValidationError): pass
class CannotChangeConstantError(ValidationError): pass
class CharacterLengthExceededError(ValidationError): pass
class UpdateAfterSubmitError(ValidationError): pass
class LinkValidationError(ValidationError): pass
class CancelledLinkError(LinkValidationError): pass
class DocstatusTransitionError(ValidationError): pass
class TimestampMismatchError(ValidationError): pass
class EmptyTableError(ValidationError): pass
class LinkExistsError(ValidationError): pass
class InvalidEmailAddressError(ValidationError): pass
class TemplateNotFoundError(ValidationError): pass
class UniqueValidationError(ValidationError): pass
class AppNotInstalledError(ValidationError): pass
class IncorrectSitePath(NotFound): pass
class ImplicitCommitError(ValidationError): pass
class RetryBackgroundJobError(Exception): pass
class DocumentLockedError(ValidationError): pass
| {
"content_hash": "f7a2b8a97365ca00207db84ad9d2959c",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 66,
"avg_line_length": 30.240963855421686,
"alnum_prop": 0.8262948207171315,
"repo_name": "bohlian/frappe",
"id": "ae9fca7e7ae7283096d6c850817576a26703d523",
"size": "2611",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "frappe/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "406369"
},
{
"name": "HTML",
"bytes": "213728"
},
{
"name": "JavaScript",
"bytes": "1741213"
},
{
"name": "Makefile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "1965275"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
class Interpolator(object):
def __init__(self):
self._destroyed = False
def binary_interpolant(self, a, b):
"""Returns a binary interpolant for the pair (a, b), if And(a, b) is
unsatisfaiable, or None if And(a, b) is satisfiable.
"""
raise NotImplementedError
def sequence_interpolant(self, formulas):
"""Returns a sequence interpolant for the conjunction of formulas, or
None if the problem is satisfiable.
"""
raise NotImplementedError
def __enter__(self):
"""Manage entering a Context (i.e., with statement)"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Manage exiting from Context (i.e., with statement)
The default behaviour is to explicitely destroy the interpolator to
free the associated resources.
"""
self.exit()
def exit(self):
"""Destroys the solver and closes associated resources."""
if not self._destroyed:
self._exit()
self._destroyed = True
def _exit(self):
"""Destroys the solver and closes associated resources."""
raise NotImplementedError
| {
"content_hash": "88de45f1a6933cd82679e564b4aa5da2",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 29.317073170731707,
"alnum_prop": 0.6064891846921797,
"repo_name": "agriggio/pysmt",
"id": "2f06e30c702fe7b88ad40fb395fee462a5fe33a7",
"size": "1853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysmt/solvers/interpolation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "5987"
},
{
"name": "Python",
"bytes": "1019481"
},
{
"name": "Shell",
"bytes": "6094"
}
],
"symlink_target": ""
} |
import os
import sys
import math
import PyPR2
MagiksPR2Path = 'Magiks/magiks/projects/s_pr2'
class IKSError( Exception ):
pass
class IKSResolver( object ):
def __init__( self ):
self.spr2_obj = None
self.iks_in_use = 0
self.np = None
self.pint = None
self.geometry = None
self.resolveIKS()
self.useSPR2()
def getArmPose( self, left_arm ):
if self.iks_in_use == 2:
self.spr2_obj.sync_object()
pos = None
orient = None
if left_arm:
pos = tuple(self.spr2_obj.larm_end_position())
orient = self.geometry.Orientation_3D( self.spr2_obj.larm_end_orientation(), representation = 'matrix' )
else:
pos = tuple(self.spr2_obj.rarm_end_position())
orient = self.geometry.Orientation_3D( self.spr2_obj.rarm_end_orientation(), representation = 'matrix' )
pose = {'position': pos, 'orientation': tuple(orient.quaternion())}
return pose
else:
if left_arm:
return PyPR2.getRelativeTF( '/base_footprint', '/l_gripper_tool_frame' )
else:
return PyPR2.getRelativeTF( '/base_footprint', '/r_gripper_tool_frame' )
def moveArmInTrajectory( self, traj, time = 10.0, left_arm = False, relative = False ):
if self.iks_in_use != 2:
raise IKSError( 'This function is only available with S-PR2.' )
if time <= 1.0:
raise IKSError( 'Invalid execution time.' )
if not isinstance( traj, list ) or len( traj ) == 0:
raise IKSError( 'Input trajectory must be a non-empty list of pose (dictionary)' )
pos_traj = self.traj.Trajectory_Polynomial()
orient_traj = self.traj.Orientation_Trajectory_Polynomial()
for idx, pose in enumerate(traj):
if not isinstance( pose, dict ) or not pose.has_key( 'position' ) or not isinstance(pose['position'], tuple) or len(pose['position']) != 3:
print 'invalid pose position at {0}'.format( idx )
else:
pos_traj.add_point(phi = float(idx), pos = self.np.array(pose['position']))
# optional
if pose.has_key( 'orientation' ) and isinstance(pose['orientation'], tuple) and len(pose['orientation']) == 4:
orient_traj.add_point(phi = float(idx), ori = self.geometry.Orientation_3D( pose['orientation'], representation = 'quaternion' ) )
#pos_traj.consistent_velocities()
if left_arm:
jt = self.spr2_obj.larm.project_to_js( pos_traj, orient_traj, relative = relative )
pos_traj.consistent_velocities()
jt.consistent_velocities()
self.spr2_obj.run_config_trajectory(jt, is_left_arm = True, duration = time)
else:
jt = self.spr2_obj.rarm.project_to_js( pos_traj, orient_traj, relative = relative )
jt.consistent_velocities()
self.spr2_obj.run_config_trajectory(jt, is_left_arm = False, duration = time)
def moveArmWithSPR2( self, **kwargs ):
if not self.spr2_obj:
return False
if not kwargs.has_key( 'position' ) or not kwargs.has_key( 'orientation' ) or not kwargs.has_key( 'use_left_arm'):
print 'Invalid input argument'
return False
self.spr2_obj.arm_speed = 0.1
if kwargs.has_key( 'wait' ):
wait = kwargs['wait']
else:
wait = True
if kwargs['use_left_arm']:
self.spr2_obj.larm_reference = True
else:
self.spr2_obj.larm_reference = False
self.spr2_obj.sync_object()
arm_orient = self.geometry.Orientation_3D( kwargs['orientation'], representation = 'quaternion' )
self.spr2_obj.set_target( self.np.array(kwargs['position']), arm_orient.matrix() )
return self.spr2_obj.arm_target(wait = wait)
def dummyMoveArmTo( self, wait = True, **kwargs ):
raise IKSError( 'NO IKS solver is available to PyRIDE' )
def resetMotionCallbacks( self ):
if self.iks_in_use == 2:
self.pint.set_callback_functions()
def resolveIKS( self ):
PyPR2.moveArmTo = self.dummyMoveArmTo
PyPR2.getArmPose = self.getArmPose
PyPR2.moveArmInTrajectory = self.moveArmInTrajectory
iksPath = os.path.join( sys.path[0], MagiksPR2Path )
if os.path.exists( iksPath ):
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/python2.7/dist-packages/')
sys.path.append('/usr/lib/pymodules/python2.7')
sys.path.append(iksPath)
try:
import initialize
from magiks.specific_geometries.pr2 import pyride_synchronizer as pys
import numpy as np
from math_tools.geometry import geometry
from math_tools.geometry import trajectory as traj
self.np = np
self.pint = pys.pint
self.geometry = geometry
self.spr2_obj = pys.PyRide_PR2()
self.traj = traj
except:
print 'unable to load S-PR2/Magiks engine'
self.spr2_obj = None
def useSPR2( self ):
if self.spr2_obj:
PyPR2.moveArmTo = self.moveArmWithSPR2
self.iks_in_use = 2
print 'PyRIDE is using S-PR2 for PR2'
def useMoveIt( self ):
if PyPR2.useMoveIt():
PyPR2.moveArmTo = PyPR2.moveArmPoseTo
self.iks_in_use = 1
print 'PyRIDE is using MoveIt! for PR2'
def iksInUse( self ):
if self.iks_in_use == 1:
return 'MoveIt!'
elif self.iks_in_use == 2:
return 'S-PR2'
else:
return 'None'
| {
"content_hash": "e62b60eb16defa8002d1030affecce6f",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 145,
"avg_line_length": 34.86754966887417,
"alnum_prop": 0.6429249762583096,
"repo_name": "uts-magic-lab/pyride_pr2",
"id": "32415c3f3f267d36d69d7d0d5d5ea8b356ce21b3",
"size": "5265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/iksresolver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "20404"
},
{
"name": "C++",
"bytes": "187071"
},
{
"name": "CMake",
"bytes": "6566"
},
{
"name": "Python",
"bytes": "137590"
}
],
"symlink_target": ""
} |
import time
from ginga.misc import Bunch, Datasrc, Callback, Future, Settings
from ginga.util import viewer as gviewer
class ChannelError(Exception):
pass
class Channel(Callback.Callbacks):
"""Class to manage a channel.
Parameters
----------
name : str
Name of the channel.
fv : `~ginga.rv.Control.GingaShell`
The reference viewer shell.
settings : `~ginga.misc.Settings.SettingGroup`
Channel settings.
datasrc : `~ginga.misc.Datasrc.Datasrc`
Data cache.
"""
def __init__(self, name, fv, settings, datasrc=None):
super(Channel, self).__init__()
self.logger = fv.logger
self.fv = fv
self.settings = settings
self.logger = fv.logger
# CHANNEL ATTRIBUTES
self.name = name
self.widget = None
self.container = None
self.workspace = None
self.opmon = None
# this is the image viewer we are connected to
self.fitsimage = None
# this is the currently active viewer
self.viewer = None
self.viewers = []
self.viewer_dict = {}
if datasrc is None:
num_images = self.settings.get('numImages', 1)
datasrc = Datasrc.Datasrc(num_images)
self.datasrc = datasrc
self.cursor = -1
self.history = []
self.image_index = {}
# external entities can attach stuff via this attribute
self.extdata = Bunch.Bunch()
self._configure_sort()
self.settings.get_setting('sort_order').add_callback(
'set', self._sort_changed_ext_cb)
def connect_viewer(self, viewer):
if viewer not in self.viewers:
self.viewers.append(viewer)
self.viewer_dict[viewer.vname] = viewer
def move_image_to(self, imname, channel):
if self == channel:
return
self.copy_image_to(imname, channel)
self.remove_image(imname)
def copy_image_to(self, imname, channel, silent=False):
if self == channel:
return
if imname in channel:
# image with that name is already there
return
# transfer image info
info = self.image_index[imname]
was_not_there_already = channel._add_info(info)
try:
image = self.datasrc[imname]
except KeyError:
return
if was_not_there_already:
channel.datasrc[imname] = image
if not silent:
self.fv.gui_do(channel.add_image_update, image, info,
update_viewer=False)
def remove_image(self, imname):
info = self.image_index[imname]
self.remove_history(imname)
if imname in self.datasrc:
image = self.datasrc[imname]
self.datasrc.remove(imname)
# update viewer if we are removing the currently displayed image
cur_image = self.viewer.get_dataobj()
if cur_image == image:
self.refresh_cursor_image()
self.fv.make_async_gui_callback('remove-image', self.name,
info.name, info.path)
return info
def get_image_names(self):
return [info.name for info in self.history]
def get_loaded_image(self, imname):
"""Get an image from memory.
Parameters
----------
imname : str
Key, usually image name and extension.
Returns
-------
image
Image object.
Raises
------
KeyError
Image is not in memory.
"""
image = self.datasrc[imname]
return image
def add_image(self, image, silent=False, bulk_add=False):
imname = image.get('name', None)
if imname is None:
raise ValueError("image has no name")
self.logger.debug("Adding image '%s' in channel %s" % (
imname, self.name))
self.datasrc[imname] = image
# Has this image been loaded into a channel before?
info = image.get('image_info', None)
if info is None:
# No
idx = image.get('idx', None)
path = image.get('path', None)
image_loader = image.get('image_loader', None)
image_future = image.get('image_future', None)
info = self.add_history(imname, path,
image_loader=image_loader,
image_future=image_future,
idx=idx)
image.set(image_info=info)
# add an image profile if one is missing
profile = self.get_image_profile(image)
info.profile = profile
if not silent:
self.add_image_update(image, info,
update_viewer=not bulk_add)
def add_image_info(self, info):
image_loader = info.get('image_loader', self.fv.load_image)
# create an image_future if one does not exist
image_future = info.get('image_future', None)
if (image_future is None) and (info.path is not None):
image_future = Future.Future()
image_future.freeze(image_loader, info.path)
info = self.add_history(info.name, info.path,
image_loader=image_loader,
image_future=image_future)
def get_image_info(self, imname):
return self.image_index[imname]
def update_image_info(self, image, info):
imname = image.get('name', None)
if (imname is None) or (imname not in self.image_index):
return False
# don't update based on image name alone--actual image must match
try:
my_img = self.get_loaded_image(imname)
if my_img is not image:
return False
except KeyError:
return False
# update the info record
iminfo = self.get_image_info(imname)
iminfo.update(info)
self.fv.make_async_gui_callback('add-image-info', self, iminfo)
return True
def add_image_update(self, image, info, update_viewer=False):
self.fv.make_async_gui_callback('add-image', self.name, image, info)
if not update_viewer:
return
current = self.datasrc.youngest()
curname = current.get('name')
self.logger.debug("image=%s youngest=%s" % (image.get('name'), curname))
if current != image:
return
# switch to current image?
if self.settings['switchnew']:
self.logger.debug("switching to new image '%s'" % (curname))
self.switch_image(image)
if self.settings['raisenew']:
channel = self.fv.get_current_channel()
if channel != self:
self.fv.change_channel(self.name)
def refresh_cursor_image(self):
if self.cursor < 0:
self.viewer.clear()
self.fv.channel_image_updated(self, None)
return
info = self.history[self.cursor]
if info.name in self.datasrc:
# object still in memory
data_obj = self.datasrc[info.name]
self.switch_image(data_obj)
else:
self.switch_name(info.name)
def prev_image(self, loop=True):
self.logger.debug("Previous image")
if self.cursor <= 0:
n = len(self.history) - 1
if (not loop) or (n < 0):
self.logger.error("No previous image!")
return True
self.cursor = n
else:
self.cursor -= 1
self.refresh_cursor_image()
return True
def next_image(self, loop=True):
self.logger.debug("Next image")
n = len(self.history) - 1
if self.cursor >= n:
if (not loop) or (n < 0):
self.logger.error("No next image!")
return True
self.cursor = 0
else:
self.cursor += 1
self.refresh_cursor_image()
return True
def _add_info(self, info):
if info.name in self.image_index:
# image info is already present
return False
self.history.append(info)
self.image_index[info.name] = info
if self.hist_sort is not None:
self.history.sort(key=self.hist_sort)
self.fv.make_async_gui_callback('add-image-info', self, info)
# image was newly added
return True
def add_history(self, imname, path, idx=None,
image_loader=None, image_future=None):
if not (imname in self.image_index):
if image_loader is None:
image_loader = self.fv.load_image
# create an image_future if one does not exist
if (image_future is None) and (path is not None):
image_future = Future.Future()
image_future.freeze(image_loader, path)
info = Bunch.Bunch(name=imname, path=path,
idx=idx,
image_loader=image_loader,
image_future=image_future,
time_added=time.time(),
time_modified=None,
last_viewer_info=None,
profile=None)
self._add_info(info)
else:
# already in history
info = self.image_index[imname]
return info
def remove_history(self, imname):
if imname in self.image_index:
info = self.image_index[imname]
del self.image_index[imname]
i = self.history.index(info)
self.history.remove(info)
# adjust cursor as necessary
if i < self.cursor:
self.cursor -= 1
if self.cursor >= len(self.history):
# loop
self.cursor = min(0, len(self.history) - 1)
self.fv.make_async_gui_callback('remove-image-info', self, info)
def get_current_image(self):
return self.viewer.get_dataobj()
def view_object(self, dataobj):
# see if a viewer has been used on this object before
vinfo = None
obj_name = dataobj.get('name')
if obj_name in self.image_index:
info = self.image_index[obj_name]
vinfo = info.last_viewer_info
if vinfo is not None:
# use the viewer we used before
viewers = [vinfo]
else:
# find available viewers that can view this kind of object
viewers = gviewer.get_priority_viewers(dataobj)
if len(viewers) == 0:
raise ValueError("No viewers for this data object!")
self.logger.debug("{} available viewers for this model".format(len(viewers)))
# if there is only one viewer available, use it otherwise
# pop-up a dialog and ask the user
if len(viewers) == 1:
self._open_with_viewer(viewers[0], dataobj)
return
msg = ("Multiple viewers are available for this data object. "
"Please select one.")
self.fv.gui_choose_viewer(msg, viewers, self._open_with_viewer,
dataobj)
def _open_with_viewer(self, vinfo, dataobj):
# if we don't have this viewer type then install one in the channel
if vinfo.name not in self.viewer_dict:
self.fv.make_viewer(vinfo, self)
self.viewer = self.viewer_dict[vinfo.name]
# find this viewer and raise it
idx = self.viewers.index(self.viewer)
self.widget.set_index(idx)
# and load the data
self.viewer.set_dataobj(dataobj)
obj_name = dataobj.get('name')
if obj_name in self.image_index:
info = self.image_index[obj_name]
# record viewer last used to view this object
info.last_viewer_info = vinfo
if info in self.history:
# update cursor to match dataobj
self.cursor = self.history.index(info)
self.fv.channel_image_updated(self, dataobj)
# Check for preloading any dataobjs into memory
preload = self.settings.get('preload_images', False)
if not preload:
return
# queue next and previous files for preloading
index = self.cursor
if index < len(self.history) - 1:
info = self.history[index + 1]
if info.path is not None:
self.fv.add_preload(self.name, info)
if index > 0:
info = self.history[index - 1]
if info.path is not None:
self.fv.add_preload(self.name, info)
def switch_image(self, image):
curimage = self.get_current_image()
if curimage == image:
self.logger.debug("Apparently no need to set channel viewer.")
return
self.logger.debug("updating viewer...")
self.view_object(image)
def switch_name(self, imname):
if imname in self.datasrc:
# Image is still in the heap
image = self.datasrc[imname]
self.switch_image(image)
return
if not (imname in self.image_index):
errmsg = "No image by the name '%s' found" % (imname)
self.logger.error("Can't switch to image '%s': %s" % (
imname, errmsg))
raise ChannelError(errmsg)
# Do we have a way to reconstruct this image from a future?
info = self.image_index[imname]
if info.image_future is not None:
self.logger.info("Image '%s' is no longer in memory; attempting "
"image future" % (imname))
# TODO: recode this--it's a bit messy
def _switch(image):
# this will be executed in the gui thread
self.add_image(image, silent=True)
self.switch_image(image)
# reset modified timestamp
info.time_modified = None
self.fv.make_async_gui_callback('add-image-info', self, info)
def _load_n_switch(imname, path, image_future):
# this will be executed in a non-gui thread
# reconstitute the image
image = self.fv.error_wrap(image_future.thaw)
if isinstance(image, Exception):
errmsg = "Error reconstituting image: %s" % (str(image))
self.logger.error(errmsg)
raise image
profile = info.get('profile', None)
if profile is None:
profile = self.get_image_profile(image)
info.profile = profile
# perpetuate some of the image metadata
image.set(image_future=image_future, name=imname, path=path,
image_info=info, profile=profile)
self.fv.gui_do(_switch, image)
self.fv.nongui_do(_load_n_switch, imname, info.path,
info.image_future)
elif info.path is not None:
# Do we have a path? We can try to reload it
self.logger.debug("Image '%s' is no longer in memory; attempting "
"to load from %s" % (imname, info.path))
#self.fv.load_file(path, chname=chname)
self.fv.nongui_do(self.load_file, info.path, chname=self.name)
else:
raise ChannelError("No way to recreate image '%s'" % (imname))
def _configure_sort(self):
self.hist_sort = lambda info: info.time_added
# set sorting function
sort_order = self.settings.get('sort_order', 'loadtime')
if sort_order == 'alpha':
# sort history alphabetically
self.hist_sort = lambda info: info.name
def _sort_changed_ext_cb(self, setting, value):
self._configure_sort()
self.history.sort(key=self.hist_sort)
def get_image_profile(self, image):
profile = image.get('profile', None)
if profile is None:
profile = Settings.SettingGroup()
image.set(profile=profile)
return profile
def __len__(self):
return len(self.history)
def __contains__(self, imname):
return imname in self.image_index
def __getitem__(self, imname):
return self.image_index[imname]
# END
| {
"content_hash": "c93a36c1d383e452def7d287cd9a45e9",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 89,
"avg_line_length": 32.17988394584139,
"alnum_prop": 0.5458315802127788,
"repo_name": "pllim/ginga",
"id": "f9ad85a1292bb08c5ee5d7ef38e5ca446b52657f",
"size": "16813",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ginga/rv/Channel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2781"
},
{
"name": "GLSL",
"bytes": "7344"
},
{
"name": "HTML",
"bytes": "2129"
},
{
"name": "JavaScript",
"bytes": "87198"
},
{
"name": "Jupyter Notebook",
"bytes": "2691970"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "4359761"
}
],
"symlink_target": ""
} |
"""Read CPR files."""
import logging
import re
import sys
import numpy as np
import dwi.files
from dwi.types import Path
def read_cpr(path):
"""Read and parse a CPR file. Return masks, which are (id, RLE data).
Note: Python 3.6.4 docs say its XML module is not secure, se we use regexp.
"""
# Example: <Mask id="123">[12 34 56]</Mask>
mask_pattern = r'<Mask\s.*?id="(.*?)".*?>.*?\[(.*?)\].*?</Mask>'
text = path.read_text()
matches = re.finditer(mask_pattern, text, flags=re.DOTALL)
def parse_match(m):
number, mask = m.groups()
number = int(number)
mask = [int(x) for x in mask.split()]
return number, mask
masks = [parse_match(x) for x in matches]
return masks
def parse_mask(mask):
"""Decode a run length encoded mask into an array."""
lst = []
for length in mask:
n = int(length > 0) # Run is 1 if positive, 0 if negative.
lst.extend([n] * abs(length))
return np.array(lst, dtype=np.bool)
def main(path, shape, outdir, fmt='h5'):
logging.info(path)
masks = read_cpr(path)
logging.info(len(masks))
# logging.info(masks)
# logging.info(masks[-1][1])
# logging.info(len(parse_mask(masks[-1][1])))
for i, m in enumerate(masks, 1):
number, mask = m
logging.info('Mask: %i, $i', number, len(mask))
mask = parse_mask(mask)
try:
mask.shape = shape + (1,)
except ValueError as e:
logging.error('%s: %s', e, path)
continue
assert mask.ndim == 4, mask.shape
outname = '{p}.{i:02d}-{n}.{f}'.format(p=path.name, i=i, n=number,
f=fmt)
outpath = outdir / outname
attrs = {}
print('Writing mask shape {}: {}'.format(mask.shape, outpath))
dwi.files.ensure_dir(outpath)
dwi.files.write_pmap(outpath, mask, attrs)
if __name__ == '__main__':
# Arguments: input file; output directory; shape (eg 20,224,224).
logging.basicConfig(level=logging.INFO)
path, outdir, shape = sys.argv[1:]
logging.info(path, outdir, shape, sys.argv[1:])
path = Path(path)
shape = tuple(int(x) for x in shape.split(','))
outdir = Path(outdir)
main(path, shape, outdir)
| {
"content_hash": "8e8f675e30c2c44c4bc65e9a6e8dc8ad",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 30.092105263157894,
"alnum_prop": 0.570179274158286,
"repo_name": "jupito/dwilib",
"id": "4114935e48bf5efb2f1c9d1b546f0e1f75e95892",
"size": "2307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dwi/tools/cpr_tool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "341365"
},
{
"name": "Shell",
"bytes": "4383"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import errno
import re
import socket
import ssl
try:
from ssl import SSLContext
except:
SSLContext = None
# Jython does not have this attribute
try:
from socket import SOL_TCP
except ImportError: # pragma: no cover
from socket import IPPROTO_TCP as SOL_TCP # noqa
try:
from ssl import SSLError
except ImportError:
class SSLError(Exception): # noqa
pass
from struct import pack, unpack
from .exceptions import UnexpectedFrame
from .utils import get_errno, set_cloexec
_UNAVAIL = errno.EAGAIN, errno.EINTR, errno.ENOENT
AMQP_PORT = 5672
EMPTY_BUFFER = bytes()
# Yes, Advanced Message Queuing Protocol Protocol is redundant
AMQP_PROTOCOL_HEADER = 'AMQP\x01\x01\x00\x09'.encode('latin_1')
# Match things like: [fe80::1]:5432, from RFC 2732
IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?')
class _AbstractTransport(object):
"""Common superclass for TCP and SSL transports"""
connected = False
def __init__(self, host, connect_timeout, operation_timeout):
self.connected = True
msg = None
host, port = self._get_host_and_port_from(host)
self.sock = None
last_err = None
for res in socket.getaddrinfo(host, port, 0,
socket.SOCK_STREAM, SOL_TCP):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
try:
set_cloexec(self.sock, True)
except NotImplementedError:
pass
self.sock.settimeout(connect_timeout)
self.sock.connect(sa)
except socket.error as exc:
msg = exc
self.sock.close()
self.sock = None
last_err = msg
continue
break
if not self.sock:
# Didn't connect, return the most recent error message
raise socket.error(last_err)
try:
self.sock.settimeout(operation_timeout)
self.sock.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._setup_transport()
self._write(AMQP_PROTOCOL_HEADER)
except (OSError, IOError, socket.error) as exc:
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
def __del__(self):
try:
# socket module may have been collected by gc
# if this is called by a thread at shutdown.
if socket is not None:
try:
self.close()
except socket.error:
pass
finally:
self.sock = None
@staticmethod
def _get_host_and_port_from(host):
port = AMQP_PORT
m = IPV6_LITERAL.match(host)
if m:
host = m.group(1)
if m.group(2):
port = int(m.group(2))
else:
if ':' in host:
host, port = host.rsplit(':', 1)
port = int(port)
return host, port
def _read(self, n, initial=False):
"""Read exactly n bytes from the peer"""
raise NotImplementedError('Must be overriden in subclass')
def _setup_transport(self):
"""Do any additional initialization of the class (used
by the subclasses)."""
pass
def _shutdown_transport(self):
"""Do any preliminary work in shutting down the connection."""
pass
def _write(self, s):
"""Completely write a string to the peer."""
raise NotImplementedError('Must be overriden in subclass')
def close(self):
if self.sock is not None:
try:
self._shutdown_transport()
# Call shutdown first to make sure that pending messages
# reach the AMQP broker if the program exits after
# calling this method.
self.sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
finally:
self.sock.close()
self.sock = None
self.connected = False
def read_frame(self, unpack=unpack):
read = self._read
read_frame_buffer = EMPTY_BUFFER
try:
frame_header = read(7, True)
read_frame_buffer += frame_header
frame_type, channel, size = unpack('>BHI', frame_header)
payload = read(size)
read_frame_buffer += payload
ch = ord(read(1))
except socket.timeout:
self._read_buffer = read_frame_buffer + self._read_buffer
raise
except (OSError, IOError, socket.error) as exc:
# Don't disconnect for ssl read time outs
# http://bugs.python.org/issue10272
if isinstance(exc, SSLError) and 'timed out' in str(exc):
raise socket.timeout()
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
if ch == 206: # '\xce'
return frame_type, channel, payload
else:
raise UnexpectedFrame(
'Received 0x{0:02x} while expecting 0xce'.format(ch))
def write_frame(self, frame_type, channel, payload):
size = len(payload)
try:
self._write(pack(
'>BHI%dsB' % size,
frame_type, channel, size, payload, 0xce,
))
except socket.timeout:
raise
except (OSError, IOError, socket.error) as exc:
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
class SSLTransport(_AbstractTransport):
"""Transport that works over SSL"""
def __init__(self, host, connect_timeout, operation_timeout, ssl):
if isinstance(ssl, dict):
self.sslopts = ssl
# None check for Python < 2.7.9 import
elif isinstance(ssl, SSLContext) and SSLContext is not None:
self.sslctx = ssl
try:
if ssl.check_hostname:
self.hostname, _ = self._get_host_and_port_from(host)
else:
self.hostname = None
except AttributeError:
# Python versions < 3.4 do not support check_hostname
self.hostname = None
self._read_buffer = EMPTY_BUFFER
super(SSLTransport, self).__init__(
host, connect_timeout, operation_timeout
)
def _setup_transport(self):
"""Wrap the socket in an SSL object."""
if hasattr(self, 'sslopts'):
self.sock = ssl.wrap_socket(self.sock, **self.sslopts)
elif hasattr(self, 'sslctx'):
self.sock = self.sslctx.wrap_socket(self.sock,
server_hostname=self.hostname)
else:
self.sock = ssl.wrap_socket(self.sock)
self.sock.do_handshake()
self._quick_recv = self.sock.read
def _shutdown_transport(self):
"""Unwrap a Python 2.6 SSL socket, so we can call shutdown()"""
if self.sock is not None:
try:
unwrap = self.sock.unwrap
except AttributeError:
return
try:
self.sock = unwrap()
except ValueError:
# Failure within SSL might mean unwrap exists but socket is not
# deemed wrapped
pass
def _read(self, n, initial=False,
_errnos=(errno.ENOENT, errno.EAGAIN, errno.EINTR)):
# According to SSL_read(3), it can at most return 16kb of data.
# Thus, we use an internal read buffer like TCPTransport._read
# to get the exact number of bytes wanted.
recv = self._quick_recv
rbuf = self._read_buffer
try:
while len(rbuf) < n:
try:
s = recv(n - len(rbuf)) # see note above
except socket.error as exc:
# ssl.sock.read may cause ENOENT if the
# operation couldn't be performed (Issue celery#1414).
if not initial and exc.errno in _errnos:
continue
raise
if not s:
raise IOError('Socket closed')
rbuf += s
except:
self._read_buffer = rbuf
raise
result, self._read_buffer = rbuf[:n], rbuf[n:]
return result
def _write(self, s):
"""Write a string out to the SSL socket fully."""
try:
write = self.sock.write
except AttributeError:
# Works around a bug in python socket library
raise IOError('Socket closed')
else:
while s:
n = write(s)
if not n:
raise IOError('Socket closed')
s = s[n:]
class TCPTransport(_AbstractTransport):
"""Transport that deals directly with TCP socket."""
def _setup_transport(self):
"""Setup to _write() directly to the socket, and
do our own buffered reads."""
self._write = self.sock.sendall
self._read_buffer = EMPTY_BUFFER
self._quick_recv = self.sock.recv
def _read(self, n, initial=False, _errnos=(errno.EAGAIN, errno.EINTR)):
"""Read exactly n bytes from the socket"""
recv = self._quick_recv
rbuf = self._read_buffer
try:
while len(rbuf) < n:
try:
s = recv(n - len(rbuf))
except socket.error as exc:
if not initial and exc.errno in _errnos:
continue
raise
if not s:
raise IOError('Socket closed')
rbuf += s
except:
self._read_buffer = rbuf
raise
result, self._read_buffer = rbuf[:n], rbuf[n:]
return result
def create_transport(host, connect_timeout, operation_timeout, ssl=False):
"""Given a few parameters from the Connection constructor,
select and create a subclass of _AbstractTransport."""
if ssl:
return SSLTransport(host, connect_timeout, operation_timeout, ssl)
else:
return TCPTransport(host, connect_timeout, operation_timeout)
| {
"content_hash": "904184413a45a36850d8b5b0e1c80b41",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 79,
"avg_line_length": 33.31230283911672,
"alnum_prop": 0.5383522727272727,
"repo_name": "Iotic-Labs/py-IoticAgent",
"id": "03c66a3846d40fe97eae0125a323e880f35aee3e",
"size": "11337",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/IoticAgent/third/amqp/transport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "415"
},
{
"name": "Makefile",
"bytes": "608"
},
{
"name": "Python",
"bytes": "562219"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
} |
from datetime import datetime, timezone
def utc_now() -> datetime:
""" Return current time with tz=UTC """
return datetime.now(tz=timezone.utc)
| {
"content_hash": "4cfb5c40226164b2c717fbfd82c45dcd",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 43,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.6948051948051948,
"repo_name": "SUNET/eduid-common",
"id": "2bd902f8d11af375d9862d48bc8f0b2ddaff6736",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/eduid_common/misc/timeutil.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "186"
},
{
"name": "Python",
"bytes": "415412"
}
],
"symlink_target": ""
} |
import time
import multiprocessing
from multiprocessing.dummy import Pool
def simple_parallel():
values = [[2, 3, 5], [5, 5, 5], [2], [3, 3]]
pool = Pool(4)
results = pool.map(sum, values)
pool.close() # closing the pool
pool.join() # waiting for the work to finish
print results
def worker1(i):
print multiprocessing.current_process().name, i
def worker2(i):
print multiprocessing.current_process().name, "Start"
time.sleep(i)
print multiprocessing.current_process().name, " End "
def multiprocess_learn():
for i in xrange(5):
p = multiprocessing.Process(target=worker1, args=(i,))
p.start()
p = multiprocessing.Process(name="user-defined", target=worker1, args=(5,))
p.start()
p = multiprocessing.Process(name="sleep0.5", target=worker2, args=(0.5,))
p.start()
time.sleep(1)
if p.is_alive():
p.terminate()
p = multiprocessing.Process(name="sleep3", target=worker2, args=(3,))
p.start()
time.sleep(2)
if p.is_alive():
p.terminate() # 强制终止进程退出
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
def run(self):
while True:
next_task = self.task_queue.get()
if next_task is None:
print "Exiting {}\n".format(self.name)
self.task_queue.task_done()
break
print self.name, next_task
answer = next_task()
self.task_queue.task_done()
self.result_queue.put(answer)
return
class Task(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __call__(self):
time.sleep(0.1)
return "{} * {} = {}".format(self.a, self.b, self.a * self.b)
def __str__(self):
return "{} * {} ...".format(self.a, self.b)
def multiprocessing_queue():
# 用JoinabelQueue在进程中传递消息
n = multiprocessing.cpu_count() * 2
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
for i in xrange(n):
consumer = Consumer(tasks, results)
consumer.start()
print "-------------"
m = 10
for i in xrange(m):
tasks.put(Task(i, i))
for i in xrange(n):
tasks.put(None)
tasks.join()
for i in xrange(m):
print results.get()
def stage1(cond):
name = multiprocessing.current_process().name
print "starting", name
with cond:
print name
cond.notify_all() # Wake up all threads waiting on this condition.
def stage2(cond):
name = multiprocessing.current_process().name
print "starting", name
with cond:
cond.wait() # Wait until notified or until a timeout occurs.
print name
def multiprocessing_condition():
# 同步一个工作流,使得一部分并行运行,一部分串行
# https://docs.python.org/2/library/threading.html#threading.Condition
condition = multiprocessing.Condition()
s0 = multiprocessing.Process(name="s0", target=stage1, args=(condition,))
s_clients = [multiprocessing.Process(target=stage2, args=(condition,), name=str(i)) for i in xrange(3)]
for c in s_clients:
c.start()
time.sleep(1)
s0.start()
s0.join()
for c in s_clients:
c.join()
if __name__ == "__main__":
# multiprocess_learn()
# multiprocessing_queue()
multiprocessing_condition()
| {
"content_hash": "c7bcbbe4b599cbcf530c476d44f78ade",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 107,
"avg_line_length": 26.709923664122137,
"alnum_prop": 0.5984567019148328,
"repo_name": "Officium/iLearn",
"id": "6056c7f976389a27e7b495126c3097c743806625",
"size": "3603",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pystdlib/processes and threads.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "201"
},
{
"name": "Python",
"bytes": "24149"
},
{
"name": "Shell",
"bytes": "113"
}
],
"symlink_target": ""
} |
import socket
import ssl
import sys
import eventlet
import eventlet.wsgi
import greenlet
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, application, host=None, port=None, threads=1000):
self.application = application
self.host = host or '0.0.0.0'
self.port = port or 0
self.pool = eventlet.GreenPool(threads)
self.socket_info = {}
self.greenthread = None
self.do_ssl = False
self.cert_required = False
def start(self, key=None, backlog=128):
"""Run a WSGI server with the given application."""
LOG.info(_('Starting %(arg0)s on %(host)s:%(port)s'),
{'arg0': sys.argv[0],
'host': self.host,
'port': self.port})
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
info = socket.getaddrinfo(self.host,
self.port,
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
_socket = eventlet.listen(info[-1],
family=info[0],
backlog=backlog)
if key:
self.socket_info[key] = _socket.getsockname()
# SSL is enabled
if self.do_ssl:
if self.cert_required:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
sslsocket = eventlet.wrap_ssl(_socket, certfile=self.certfile,
keyfile=self.keyfile,
server_side=True,
cert_reqs=cert_reqs,
ca_certs=self.ca_certs)
_socket = sslsocket
self.greenthread = self.pool.spawn(self._run,
self.application,
_socket)
def set_ssl(self, certfile, keyfile=None, ca_certs=None,
cert_required=True):
self.certfile = certfile
self.keyfile = keyfile
self.ca_certs = ca_certs
self.cert_required = cert_required
self.do_ssl = True
def kill(self):
if self.greenthread is not None:
self.greenthread.kill()
def wait(self):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
except KeyboardInterrupt:
pass
except greenlet.GreenletExit:
pass
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
logger = log.getLogger('eventlet.wsgi.server')
try:
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=log.WritableLogger(logger))
except Exception:
LOG.exception(_('Server error'))
raise
| {
"content_hash": "227c9a59fd208e3bc2ed1159a6ea10d4",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 76,
"avg_line_length": 34.68817204301075,
"alnum_prop": 0.5158090514569126,
"repo_name": "dsiddharth/access-keys",
"id": "661c0521ad7d81eb4d98fb10dbba7764ee2d6f76",
"size": "4079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/common/environment/eventlet_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2619408"
},
{
"name": "Shell",
"bytes": "11206"
}
],
"symlink_target": ""
} |
"""Displays localized dialogs for Kodi"""
import xbmcgui
class Dialogs(object):
"""Displays localized dialogs for Kodi"""
def __init__(self, utils):
"""Injects the utils instance
:param utils: Plugin utils
:type utils: resources.lib.Utils
"""
self.utils = utils
def show_password_dialog(self):
"""
Shows password input
:returns: string - Password characters
"""
dlg = xbmcgui.Dialog()
return dlg.input(
self.utils.get_local_string(string_id=32004),
type=xbmcgui.INPUT_ALPHANUM,
option=xbmcgui.ALPHANUM_HIDE_INPUT)
def show_email_dialog(self):
"""
Shows email input
:returns: string - Email characters
"""
dlg = xbmcgui.Dialog()
return dlg.input(
self.utils.get_local_string(string_id=32005),
type=xbmcgui.INPUT_ALPHANUM)
def show_not_available_dialog(self):
"""
Shows "video not playable/available" modal
:returns: bool - Dialog shown
"""
addon_data = self.utils.get_addon_data()
dlg = xbmcgui.Dialog()
return dlg.ok(
addon_data.get('plugin'),
self.utils.get_local_string(string_id=32009))
def show_login_failed_notification(self):
"""
Shows login failed notification for 5 sec
:returns: bool - Notification shown
"""
dialog = xbmcgui.Dialog()
dialog.notification(
self.utils.get_local_string(string_id=32006),
self.utils.get_local_string(string_id=32007),
xbmcgui.NOTIFICATION_ERROR, 5000)
def show_storing_credentials_failed(self):
"""
Shows "storing credentials failed" modal
:returns: bool - Dialog shown
"""
dialog = xbmcgui.Dialog()
dialog.ok(
self.utils.get_addon_data().get('plugin'),
self.utils.get_local_string(32008))
| {
"content_hash": "c84615ef8ec066673d008c8b82ef3e60",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 57,
"avg_line_length": 27.52054794520548,
"alnum_prop": 0.571926331508213,
"repo_name": "asciidisco/plugin.video.telekom-sport",
"id": "2a4d52602ac5914ef77b540f9729bbd953c17060",
"size": "2134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/lib/Dialogs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3527"
},
{
"name": "Python",
"bytes": "96598"
}
],
"symlink_target": ""
} |
from classytags.arguments import Argument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag
from cms.constants import PUBLISHER_STATE_PENDING
from cms.utils import get_cms_setting
from cms.utils.admin import get_admin_menu_item_context
from cms.utils.permissions import get_any_page_view_permissions
from django import template
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _
register = template.Library()
CMS_ADMIN_ICON_BASE = "%sadmin/img/" % settings.STATIC_URL
class ShowAdminMenu(InclusionTag):
name = 'show_admin_menu'
template = 'admin/cms/page/tree/menu.html'
options = Options(
Argument('page')
)
def get_context(self, context, page):
request = context['request']
if 'cl' in context:
filtered = context['cl'].is_filtered()
elif 'filtered' in context:
filtered = context['filtered']
language = context['preview_language']
# following function is newly used for getting the context per item (line)
# if something more will be required, then get_admin_menu_item_context
# function have to be updated.
# This is done because item can be reloaded after some action over ajax.
context.update(get_admin_menu_item_context(request, page, filtered, language))
return context
register.tag(ShowAdminMenu)
class TreePublishRow(Tag):
name = "tree_publish_row"
options = Options(
Argument('page'),
Argument('language')
)
def render_tag(self, context, page, language):
if page.is_published(language) and page.publisher_public_id and page.publisher_public.is_published(language):
if page.is_dirty(language):
cls = "dirty"
text = _("unpublished changes")
else:
cls = "published"
text = _("published")
else:
if language in page.languages:
public_pending = page.publisher_public_id and page.publisher_public.get_publisher_state(
language) == PUBLISHER_STATE_PENDING
if public_pending or page.get_publisher_state(
language) == PUBLISHER_STATE_PENDING:
cls = "unpublishedparent"
text = _("unpublished parent")
else:
cls = "unpublished"
text = _("unpublished")
else:
cls = "empty"
text = _("no content")
return mark_safe('<span class="%s" title="%s"></span>' % (cls, force_text(text)))
register.tag(TreePublishRow)
@register.filter
def is_published(page, language):
if page.is_published(language) and page.publisher_public_id and page.publisher_public.is_published(language):
return True
else:
if language in page.languages and page.publisher_public_id and page.publisher_public.get_publisher_state(
language) == PUBLISHER_STATE_PENDING:
return True
return False
class ShowLazyAdminMenu(InclusionTag):
name = 'show_lazy_admin_menu'
template = 'admin/cms/page/tree/lazy_child_menu.html'
options = Options(
Argument('page')
)
def get_context(self, context, page):
request = context['request']
if 'cl' in context:
filtered = context['cl'].is_filtered()
elif 'filtered' in context:
filtered = context['filtered']
language = context['preview_language']
# following function is newly used for getting the context per item (line)
# if something more will be required, then get_admin_menu_item_context
# function have to be updated.
# This is done because item can be reloaded after some action over ajax.
context.update(get_admin_menu_item_context(request, page, filtered, language))
return context
register.tag(ShowLazyAdminMenu)
class CleanAdminListFilter(InclusionTag):
"""
used in admin to display only these users that have actually edited a page
and not everybody
"""
name = 'clean_admin_list_filter'
template = 'admin/filter.html'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title, 'choices': unique_choices}
register.tag(CleanAdminListFilter)
@register.filter
def boolean_icon(value):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(
u'<img src="%sicon-%s.gif" alt="%s" />' % (CMS_ADMIN_ICON_BASE, BOOLEAN_MAPPING.get(value, 'unknown'), value))
@register.filter
def is_restricted(page, request):
if get_cms_setting('PERMISSION'):
if hasattr(page, 'permission_restricted'):
icon = boolean_icon(bool(page.permission_restricted))
else:
all_perms = list(get_any_page_view_permissions(request, page))
icon = boolean_icon(bool(all_perms))
return mark_safe(
ugettext('<span>%(icon)s</span>') % {
'icon': icon,
})
else:
icon = boolean_icon(None)
return mark_safe(
ugettext('<span>%(icon)s</span>') % {
'icon': icon,
})
@register.filter
def preview_link(page, language):
if settings.USE_I18N:
# Which one of page.get_slug() and page.get_path() is the right
# one to use in this block? They both seem to return the same thing.
try:
# attempt to retrieve the localized path/slug and return
return page.get_absolute_url(language, fallback=False)
except:
# no localized path/slug. therefore nothing to preview. stay on the same page.
# perhaps the user should be somehow notified for this.
return ''
return page.get_absolute_url(language)
class RenderPlugin(InclusionTag):
template = 'cms/content.html'
options = Options(
Argument('plugin')
)
def get_context(self, context, plugin):
return {'content': plugin.render_plugin(context, admin=True)}
register.tag(RenderPlugin)
class PageSubmitRow(InclusionTag):
name = 'page_submit_row'
template = 'admin/cms/page/submit_row.html'
def get_context(self, context):
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
basic_info = context.get('advanced_settings', False)
advanced_settings = context.get('basic_info', False)
language = context.get('language', '')
return {
# TODO check this (old code: opts.get_ordered_objects() )
'onclick_attrib': (opts and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': False,
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': False,
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'basic_info': basic_info,
'advanced_settings': advanced_settings,
'show_save': True,
'language': language,
'object_id': context.get('object_id', None)
}
register.tag(PageSubmitRow)
def in_filtered(seq1, seq2):
return [x for x in seq1 if x in seq2]
in_filtered = register.filter('in_filtered', in_filtered)
@register.simple_tag
def admin_static_url():
"""
If set, returns the string contained in the setting ADMIN_MEDIA_PREFIX, otherwise returns STATIC_URL + 'admin/'.
"""
return getattr(settings, 'ADMIN_MEDIA_PREFIX', None) or ''.join([settings.STATIC_URL, 'admin/'])
class CMSAdminIconBase(Tag):
name = 'cms_admin_icon_base'
def render_tag(self, context):
return CMS_ADMIN_ICON_BASE
register.tag(CMSAdminIconBase)
@register.inclusion_tag('cms/toolbar/plugin.html', takes_context=True)
def render_plugin_toolbar_config(context, plugin, placeholder_slot=None):
page = context['request'].current_page
cms_plugin = plugin.get_plugin_class_instance()
if placeholder_slot is None:
placeholder_slot = plugin.placeholder.slot
child_classes = cms_plugin.get_child_classes(placeholder_slot, page)
parent_classes = cms_plugin.get_parent_classes(placeholder_slot, page)
context.update({
'allowed_child_classes': child_classes,
'allowed_parent_classes': parent_classes,
'instance': plugin
})
return context
@register.inclusion_tag('admin/cms/page/plugin/submit_line.html', takes_context=True)
def submit_row_plugin(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'show_delete_link': context.get('has_delete_permission', False) and change and context.get('show_delete', True),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
| {
"content_hash": "3f68ee3e098f1ea39c2927007a0feefe",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 120,
"avg_line_length": 33.032786885245905,
"alnum_prop": 0.6253101736972705,
"repo_name": "keimlink/django-cms",
"id": "59ccd3e8d63a9ac8e45db1460a2b096df8bb5253",
"size": "10099",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/templatetags/cms_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "128012"
},
{
"name": "HTML",
"bytes": "105180"
},
{
"name": "JavaScript",
"bytes": "667899"
},
{
"name": "Python",
"bytes": "1978594"
},
{
"name": "XSLT",
"bytes": "5917"
}
],
"symlink_target": ""
} |
import sys
from os.path import dirname
from base import unittest, TestCase
if __name__ == "__main__":
here = dirname(__file__)
sys.path.insert(0, here+"/..")
from jinja2 import Environment
import jinja2_maps
class TestJinja2Maps(TestCase):
def test_version(self):
self.assertRegexpMatches(jinja2_maps.__version__, r"^\d+\.\d+\.\d+")
def test_activate_filters(self):
env = Environment()
jinja2_maps.activate_filters(env)
self.assertIn("gmaps_url", env.filters)
if __name__ == "__main__":
suite = unittest.defaultTestLoader.discover(here)
t = unittest.TextTestRunner().run(suite)
if not t.wasSuccessful():
sys.exit(1)
| {
"content_hash": "bf163923b03bcbd35f6f00c848525d77",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 76,
"avg_line_length": 27.56,
"alnum_prop": 0.6458635703918723,
"repo_name": "bfontaine/jinja2_maps",
"id": "8761e7f8e45a938ddf7eb8b2a44bfa24ce2a33d2",
"size": "713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1286"
},
{
"name": "Python",
"bytes": "13772"
}
],
"symlink_target": ""
} |
"""Module containing utility functions.
"""
# Standard libraries
import os
import errno
from math import log10, floor
from argparse import ArgumentParser
__all__ = ['line_start', 'comment', 'langs', 'file_ext', 'restrict',
'header_ext', 'line_end', 'exp_10_fun', 'array_chars',
'get_species_mappings', 'get_nu', 'read_str_num', 'split_str',
'create_dir', 'get_array', 'get_index', 'reassign_species_lists',
'is_integer', 'get_parser'
]
line_start = ' '
comment = dict(c='//', cuda='//',
fortran='!', matlab='%'
)
"""dict: comment characters for each language"""
langs = ['c', 'cuda', 'fortran', 'matlab']
"""list(`str`): list of supported languages"""
file_ext = dict(c='.c', cuda='.cu', fortran='.f90', matlab='.m')
"""dict: source code file extensions based on language"""
restrict = {'c' : '__restrict__',
'cuda' : '__restrict__'}
"""dict: language-dependent keyword for restrict"""
header_ext = dict(c='.h', cuda='.cuh')
"""dict: header extensions based on language"""
line_end = dict(c=';\n', cuda=';\n',
fortran='\n', matlab=';\n'
)
"""dict: line endings dependent on language"""
exp_10_fun = dict(c="pow(10.0, ", cuda='exp10(',
fortran='exp(log(10) * ', matlab='exp(log(10.0) * '
)
"""dict: exp10 functions for various languages"""
array_chars = dict(c="[{}]", cuda="[INDEX({})]",
fortran="({})", matlab="({})"
)
"""dict: the characters to format an index into an array per language"""
# if false, zero values will be assumed to have been set previously (by memset etc.)
# and can be skipped, to increase efficiency
def get_species_mappings(num_specs, last_species):
"""
Maps species indices around species moved to last position.
Parameters
----------
num_specs : int
Number of species.
last_species : int
Index of species being moved to end of system.
Returns
-------
fwd_species_map : list of `int`
List of original indices in new order
back_species_map : list of `int`
List of new indicies in original order
"""
fwd_species_map = list(range(num_specs))
back_species_map = list(range(num_specs))
#in the forward mapping process
#last_species -> end
#all entries after last_species are reduced by one
back_species_map[last_species + 1:] = back_species_map[last_species:-1]
back_species_map[last_species] = num_specs - 1
#in the backwards mapping
#end -> last_species
#all entries with value >= last_species are increased by one
ind = fwd_species_map.index(last_species)
fwd_species_map[ind:-1] = fwd_species_map[ind + 1:]
fwd_species_map[-1] = last_species
return fwd_species_map, back_species_map
def get_nu(isp, rxn):
"""Returns the net nu of species isp for the reaction rxn
Parameters
----------
isp : int
Species index
rxn : `ReacInfo`
Reaction
Returns
-------
nu : int
Overall stoichiometric coefficient of species ``isp`` in reaction ``rxn``
"""
if isp in rxn.prod and isp in rxn.reac:
nu = (rxn.prod_nu[rxn.prod.index(isp)] -
rxn.reac_nu[rxn.reac.index(isp)])
# check if net production zero
if nu == 0:
return 0
elif isp in rxn.prod:
nu = rxn.prod_nu[rxn.prod.index(isp)]
elif isp in rxn.reac:
nu = -rxn.reac_nu[rxn.reac.index(isp)]
else:
# doesn't participate in reaction
return 0
return nu
def read_str_num(string, sep=None):
"""Returns a list of floats pulled from a string.
Delimiter is optional; if not specified, uses whitespace.
Parameters
----------
string : str
String to be parsed.
sep : str, optional
Delimiter (default is None, which means consecutive whitespace).
Returns
-------
list of `float`
Floats separated by ``sep`` in ``string``.
"""
# separate string into space-delimited strings of numbers
num_str = string.split(sep)
return [float(n) for n in num_str]
def split_str(seq, length):
"""Separate a string seq into length-sized pieces.
Parameters
----------
seq : str
String containing sequence of smaller strings of constant length.
length : int
Length of individual sequences.
Returns
-------
list of `str`
List of strings of length ``length`` from ``seq``.
"""
return [seq[i: i + length] for i in range(0, len(seq), length)]
def create_dir(path):
"""Creates a new directory based on input path.
No error if path already exists, but other error is reported.
Parameters
----------
path : str
Path of directory to be created
Returns
-------
None
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def get_array(lang, name, index, twod=None):
"""
Given a language and an index, returns the proper string index formatted
into the appropriate array characters (e.g., [] or ()).
Parameters
----------
lang : str
One of the accepted languages
name : str
The name of the array
index : int
The index to format
twod : int, optional
If not ``None`` and the lang is 'fortan' or 'matlab' this will be formatted
as a second index in the array.
Returns
-------
name : str
String with indexed array.
"""
if index is None:
#a dummy call to see if it's in shared memory
return name
if lang in ['fortran', 'matlab']:
if twod is not None:
return name +'({}, {})'.format(index + 1, twod + 1)
return name + array_chars[lang].format(index + 1)
return name + array_chars[lang].format(index)
def get_index(lang, index):
"""
Given an integer index this function will return the proper string
version of the index based on the language and other considerations
Parameters
----------
lang : str
One of the supported languages, {'c', 'cuda', 'fortran', 'matlab'}
index : int
Returns
-------
str
The string corresponding to the correct index to be formatted into the code
"""
retval = None
if lang in ['fortran', 'matlab']:
return str(index + 1)
if lang in ['c', 'cuda']:
return str(index)
def reassign_species_lists(reacs, specs):
"""
Given a list of `ReacInfo`, and `SpecInfo`, this method will update the
`ReacInfo` reactants / products / third body list to integers
representing the species' index in the list.
Parameters
----------
reacs : list of `ReacInfo`
List of reactions to be updated.
specs : list of `SpecInfo`
List of species
Returns
-------
None
"""
species_map = {sp.name: i for i, sp in enumerate(specs)}
for rxn in reacs:
rxn.reac = [species_map[sp] for sp in rxn.reac]
rxn.prod = [species_map[sp] for sp in rxn.prod]
rxn.thd_body_eff = [(species_map[thd[0]], thd[1]) for thd in rxn.thd_body_eff]
if rxn.pdep_sp != '':
rxn.pdep_sp = species_map[rxn.pdep_sp]
else:
rxn.pdep_sp = None
def is_integer(val):
"""Returns `True` if argument is an integer or whole number.
Parameters
----------
val : int, float
Value to be checked.
Returns
-------
bool
``True`` if ``val`` is `int` or whole number (if `float`).
"""
try:
return val.is_integer()
except:
if isinstance(val, int):
return True
#last ditch effort
try:
return int(val) == float(val)
except:
return False
def get_parser():
"""
Parameters
----------
None
Returns
-------
args : `argparse.Namespace`
Command line arguments for running pyJac.
"""
import multiprocessing
# command line arguments
parser = ArgumentParser(description='pyJac: Generates source code '
'for analytical chemical '
'Jacobians.'
)
parser.add_argument('-l', '--lang',
type=str,
choices=langs,
required=True,
help='Programming language for output source files.'
)
parser.add_argument('-i', '--input',
type=str,
required=True,
help='Input mechanism filename (e.g., mech.dat).'
)
parser.add_argument('-t', '--thermo',
type=str,
default=None,
help='Thermodynamic database filename (e.g., '
'therm.dat), or nothing if in mechanism.'
)
parser.add_argument('-ic', '--initial-conditions',
type=str,
dest='initial_conditions',
default='',
required=False,
help='A comma separated list of initial initial '
'conditions to set in the '
'set_same_initial_conditions method.\n'
' Expected Form: T,P,Species1=...,Species2=...,...\n'
' Temperature in K\n'
' Pressure in Atm\n'
' Species in moles'
)
# cuda specific
parser.add_argument('-co', '--cache-optimizer',
dest='cache_optimizer',
action='store_true',
default=False,
help='Attempt to optimize cache store/loading '
'via use of a greedy selection algorithm. (Experimental)'
)
parser.add_argument('-nosmem', '--no-shared-memory',
dest='no_shared',
action='store_true',
default=False,
help='Use this option to turn off attempted shared '
'memory acceleration for CUDA.'
)
parser.add_argument('-pshare', '--prefer-shared',
dest='L1_preferred',
action='store_false',
default=True,
help='Use this option to allocate more space for '
'shared memory than the L1 cache for CUDA '
'(not recommended).'
)
parser.add_argument('-nb', '--num-blocks',
type=int,
dest='num_blocks',
default=8,
required=False,
help='The target number of blocks / sm for CUDA.'
)
parser.add_argument('-nt', '--num-threads',
type=int,
dest='num_threads',
default=64,
required=False,
help='The target number of threads / block for CUDA.'
)
parser.add_argument('-mt', '--multi-threaded',
type=int,
dest='multi_thread',
default=multiprocessing.cpu_count(),
required=False,
help='The number of threads to use during the '
'optimization process.'
)
parser.add_argument('-fopt', '--force-optimize',
dest='force_optimize',
action='store_true',
default=False,
help='Use this option to force a reoptimization of '
'the mechanism (usually only happens when '
'generating for a different mechanism).'
)
parser.add_argument('-b', '--build_path',
required=False,
default='./out/',
help='The folder to generate the Jacobian and rate subroutines in.'
)
parser.add_argument('-ls', '--last_species',
required=False,
type=str,
default=None,
help='The name of the species to set as the last in '
'the mechanism. If not specifed, defaults to '
'the first of N2, AR, and HE in the mechanism.'
)
parser.add_argument('-ad', '--auto_diff',
default=False,
action='store_true',
help='Use this option to generate file for use with the '
'Adept autodifferentiation library.')
parser.add_argument('-sj', '--skip_jac',
required=False,
default=False,
action='store_true',
help='If specified, this option turns off Jacobian generation '
'(only rate subs are generated)')
args = parser.parse_args()
return args
| {
"content_hash": "1be80ff9bbf0c84d9e187c94c726c1ce",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 91,
"avg_line_length": 31.555299539170505,
"alnum_prop": 0.5071924059875867,
"repo_name": "SLACKHA/pyJac",
"id": "8cc5998fc8f7872283019577b6570dd463c75f22",
"size": "13719",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyjac/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6869"
},
{
"name": "Cuda",
"bytes": "10026"
},
{
"name": "Python",
"bytes": "495939"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='geeseflypy',
version='0.4',
author='Jonathan Bowman',
author_email="bowmanjd@gmail.com",
url='http://code.google.com/p/geeseflypy/',
packages=['geesefly',],
license='Apache License, Version 2.0',
description='Pure Python implementation of Skein and Threefish',
long_description=open('README.txt').read(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Topic :: Security :: Cryptography",
],
)
| {
"content_hash": "32e1f4694e7ae833297943583eb83b6c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 68,
"avg_line_length": 34.48275862068966,
"alnum_prop": 0.646,
"repo_name": "bowmanjd/geeseflypy",
"id": "16a79437102d0374b4e1dd7c94fb52f0eb3562bc",
"size": "1000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30076"
}
],
"symlink_target": ""
} |
import time
import logging.config
import sys
from tashi.rpycservices.rpyctypes import Errors, HostState, InstanceState, TashiException
from tashi.util import createClient, instantiateImplementation, boolean
from tashi.utils.config import Config
import tashi
class Primitive(object):
def __init__(self, config):
self.config = config
self.cm = createClient(config)
self.hooks = []
self.log = logging.getLogger(__file__)
self.scheduleDelay = float(self.config.get("Primitive", "scheduleDelay"))
self.densePack = boolean(self.config.get("Primitive", "densePack"))
items = self.config.items("Primitive")
items.sort()
for item in items:
(name, value) = item
name = name.lower()
if (name.startswith("hook")):
try:
self.hooks.append(instantiateImplementation(value, config, self.cm, False))
except:
self.log.exception("Failed to load hook %s" % (value))
self.hosts = {}
self.load = {}
self.instances = {}
self.muffle = {}
self.lastScheduledHost = 0
self.clearHints = {}
def __getState(self):
# Generate a list of hosts and
# current loading of VMs per host
hosts = {}
# load's keys are the host id, or None if not on a host. values are instance ids
load = {}
ctr = 0
for h in self.cm.getHosts():
#XXXstroucki get all hosts here?
#if (self.__isReady(h)):
hosts[ctr] = h
ctr = ctr + 1
load[h.id] = []
load[None] = []
_instances = self.cm.getInstances()
instances = {}
for i in _instances:
instances[i.id] = i
# XXXstroucki put held machines behind pending ones
heldInstances = []
for i in instances.itervalues():
# Nonrunning VMs will have hostId of None, but
# so will Suspended VMs.
if (i.hostId or i.state == InstanceState.Pending):
load[i.hostId] = load[i.hostId] + [i.id]
elif (i.hostId is None and i.state == InstanceState.Held):
heldInstances = heldInstances + [i.id]
load[None] = load[None] + heldInstances
self.hosts = hosts
self.load = load
self.instances = instances
def __checkCapacity(self, host, inst):
# ensure host can carry new load
memUsage = reduce(lambda x, y: x + self.instances[y].memory, self.load[host.id], inst.memory)
coreUsage = reduce(lambda x, y: x + self.instances[y].cores, self.load[host.id], inst.cores)
if (memUsage <= host.memory and coreUsage <= host.cores):
return True
return False
def __clearHints(self, hint, name):
# remove the clearHint if the host comes back to normal mode
if name in self.clearHints[hint]:
popit = self.clearHints[hint].index(name)
self.clearHints[hint].pop(popit)
def __isReady(self, host):
if host.up == False or host.state != HostState.Normal:
return False
return True
def __scheduleInstance(self, inst):
try:
minMax = None
minMaxHost = None
minMaxCtr = None
densePack = inst.hints.get("densePack", None)
if (densePack is None):
densePack = self.densePack
else:
densePack = boolean(densePack)
# Grab the targetHost config options if passed
targetHost = inst.hints.get("targetHost", None)
# Check to see if we have already handled this hint
clearHints = self.clearHints
clearHints["targetHost"] = clearHints.get("targetHost", [])
# If we handled the hint, don't look at it anymore
if targetHost in clearHints["targetHost"]:
targetHost = None
try:
allowElsewhere = boolean(inst.hints.get("allowElsewhere", "False"))
except Exception, e:
allowElsewhere = False
# has a host preference been expressed?
if (targetHost != None):
for h in self.hosts.values():
if (self.__isReady(h)):
self.__clearHints("targetHost", h.name)
# if this is not the host we are looking for, continue
if ((str(h.id) != targetHost and h.name != targetHost)):
continue
# we found the targetHost
# If a host machine is reserved, only allow if userid is in reserved list
if ((len(h.reserved) > 0) and inst.userId not in h.reserved):
# Machine is reserved and not available for userId.
# XXXstroucki: Should we log something here for analysis?
break
if self.__checkCapacity(h, inst):
minMax = len(self.load[h.id])
minMaxHost = h
# end targethost != none
# If we don't have a host yet, find one here
if ((targetHost == None or allowElsewhere) and minMaxHost == None):
# cycle list
# Adding this to catch if this gets set to None. Fix
if self.lastScheduledHost == None:
self.lastScheduledHost = 0
for ctr in range(self.lastScheduledHost, len(self.hosts)) + range(0, self.lastScheduledHost):
h = self.hosts[ctr]
# XXXstroucki if it's unavailable, find another machine
if (self.__isReady(h) == False):
continue
else:
# If the host is back to normal, get rid of the entry in clearHints
self.__clearHints("targetHost", h.name)
# if it's reserved, see if we can use it
if ((len(h.reserved) > 0) and inst.userId not in h.reserved):
# reserved for somebody else, so find another machine
continue
# implement dense packing policy:
# consider this host if
# minMax has not been modified or
# the number of vms here is greater than minmax if we're dense packing or
# the number of vms here is less than minmax if we're not dense packing
if (minMax is None or (densePack and len(self.load[h.id]) > minMax) or (not densePack and len(self.load[h.id]) < minMax)):
if self.__checkCapacity(h, inst):
minMax = len(self.load[h.id])
minMaxHost = h
minMaxCtr = ctr
# check that VM image isn't mounted persistent already
# Should set a status code to alert user
# Tried to update the state of the instance and set persistent=False but
# couldn't do it, should work until we find a better way to do this
if inst.disks[0].persistent == True:
count = 0
myDisk = inst.disks[0].uri
for i in self.cm.getInstances():
if myDisk == i.disks[0].uri and i.disks[0].persistent == True:
count += 1
if count > 1:
minMaxHost = None
if (minMaxHost):
# found a host
if (not inst.hints.get("__resume_source", None)):
# only run preCreate hooks if newly starting
for hook in self.hooks:
try:
hook.preCreate(inst)
except:
self.log.warning("Failed to run preCreate hook")
self.log.info("Scheduling instance %s (%d mem, %d cores, %d uid) on host %s" % (inst.name, inst.memory, inst.cores, inst.userId, minMaxHost.name))
rv = "fail"
try:
rv = self.cm.activateVm(inst.id, minMaxHost)
if rv == "success":
self.lastScheduledHost = minMaxCtr
self.load[minMaxHost.id] = self.load[minMaxHost.id] + [inst.id]
# get rid of its possible entry in muffle if VM is scheduled to a host
if (inst.name in self.muffle):
self.muffle.pop(inst.name)
else:
self.log.warning("Instance %s failed to activate on host %s" % (inst.name, minMaxHost.name))
except TashiException, e :
# If we try to activate the VM and get errno 10, host not in normal mode, add it to the list
# check for other errors later
if e.errno == Errors.HostStateError:
self.clearHints["targetHost"] = self.clearHints.get("targetHost", [])
self.clearHints["targetHost"].append(targetHost)
else:
# did not find a host
if (inst.name not in self.muffle):
self.log.info("Failed to find a suitable place to schedule %s" % (inst.name))
self.muffle[inst.name] = True
except Exception, e:
# XXXstroucki: how can we get here?
if (inst.name not in self.muffle):
self.log.exception("Failed to schedule or activate %s" % (inst.name))
self.muffle[inst.name] = True
def start(self):
oldInstances = {}
# XXXstroucki: scheduling races have been observed, where
# a vm is scheduled on a host that had not updated its
# capacity with the clustermanager, leading to overloaded
# hosts. I think the place to insure against this happening
# is in the nodemanager. This scheduler will keep an
# internal state of cluster loading, but that is best
# effort and will be refreshed from CM once the buffer
# of vms to be scheduled is exhausted.
while True:
try:
# XXXstroucki: to get a list of vms to be
# scheduled, it asks the CM for a full
# cluster state, and will look at those
# without a host.
self.__getState()
# Check for VMs that have exited and call
# postDestroy hook
for i in oldInstances:
# XXXstroucki what about paused and saved VMs?
# XXXstroucki: do we need to look at Held VMs here?
if (i not in self.instances and (oldInstances[i].state == InstanceState.Running or oldInstances[i].state == InstanceState.Destroying or oldInstances[i].state == InstanceState.ShuttingDown)):
self.log.info("VM exited: %s" % (oldInstances[i].name))
for hook in self.hooks:
hook.postDestroy(oldInstances[i])
oldInstances = self.instances
if (len(self.load.get(None, [])) > 0):
# Schedule VMs if they are waiting
# sort by id number (FIFO?)
self.load[None].sort()
for i in self.load[None]:
inst = self.instances[i]
self.__scheduleInstance(inst)
# end for unassigned vms
except TashiException:
self.log.exception("Tashi exception")
except Exception:
self.log.warning("Scheduler iteration failed")
# wait to do the next iteration
time.sleep(self.scheduleDelay)
def main():
config = Config(["Agent"])
configFiles = config.getFiles()
publisher = instantiateImplementation(config.get("Agent", "publisher"), config)
tashi.publisher = publisher
logging.config.fileConfig(configFiles)
agent = Primitive(config)
try:
agent.start()
except KeyboardInterrupt:
pass
log = logging.getLogger(__file__)
log.info("Primitive exiting")
sys.exit(0)
if __name__ == "__main__":
main()
| {
"content_hash": "3ae99d831828f4defac0ae700bbd1578",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 195,
"avg_line_length": 33.096345514950166,
"alnum_prop": 0.664123669945794,
"repo_name": "stroucki/tashi",
"id": "a6a14e004197041863b94f0715ba752915957b83",
"size": "10773",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/tashi/agents/primitive.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "9326"
},
{
"name": "PHP",
"bytes": "28750"
},
{
"name": "Python",
"bytes": "606584"
},
{
"name": "Shell",
"bytes": "28185"
}
],
"symlink_target": ""
} |
"""A source for reading VCF files and extracting signals about input size."""
from functools import partial
from typing import Dict, Iterable # pylint: disable=unused-import
from apache_beam import transforms
from apache_beam.io import filebasedsource
from apache_beam.io import filesystem
from apache_beam.io import filesystems
from apache_beam.io import iobase
from apache_beam.io import range_trackers # pylint: disable=unused-import
class VcfEstimate():
"""Container for estimation data about the VCF file."""
def __init__(self,
file_name, # type: str
estimated_variant_count, # type: float
samples, # type: List[str]
size_in_bytes # type: int
):
# type: (...) -> None
"""Initializes a VcfEstimate object.
Args:
file_name: Name of file.
estimated_variant_count: Estimated number of variants.
samples: Sample names in the file.
size_in_bytes: Size of the file.
"""
self.file_name = file_name
self.estimated_variant_count = estimated_variant_count
self.samples = samples
self.size_in_bytes = size_in_bytes
def __eq__(self, other):
return (self.file_name == other.file_name and
self.estimated_variant_count == other.estimated_variant_count and
self.samples == other.samples and
self.size_in_bytes == other.size_in_bytes)
def __repr__(self):
return 'File Name: {}, Variant Count: {}, Samples: {}, Size: {}'.format(
self.file_name,
self.estimated_variant_count,
self.samples,
self.size_in_bytes
)
class VcfEstimateSource(filebasedsource.FileBasedSource):
"""A source for inferring the estimate input sizes of VCF file."""
def __init__(self,
file_pattern,
compression_type=filesystem.CompressionTypes.AUTO,
validate=True):
# type: (str, str, bool) -> None
super().__init__(file_pattern,
compression_type=compression_type,
validate=validate,
splittable=False)
self._compression_type = compression_type
def _get_header_info(self, file_to_read, file_name):
# type: (str, str) -> (int, str)
"""Returns the header size and sample names."""
header_size = 0
header_line = file_to_read.readline().decode('utf-8')
# Read and skip all header lines starting with ##. Make sure to calculate
# their total size, to marginally better approximate the line count.
while (header_line.startswith('##') or not header_line or
not header_line.strip()):
header_size += len(header_line)
header_line = file_to_read.readline().decode('utf-8')
if not header_line.startswith('#'):
raise ValueError(('No column-defining header line was found in file {}.'
.format(file_name)))
header_size += len(header_line)
calls = header_line.split()[8:] # Removes #CHROME..INFO mandatory fields.
return (header_size,
calls if (not calls or calls[0] != 'FORMAT') else calls[1:])
def _estimate_variant_count(self, file_to_read, file_name, header_size):
"""Calculates the approximate number of data lines in the file.
Extracts the size of the first records data line, and gets the size of the
total file size from filesystem. Generates the approximate data line count
by subtracting header size from total size and diving it by the single line
size.
"""
size_in_bytes = filesystems.FileSystems.match(
[file_name])[0].metadata_list[0].size_in_bytes
all_lines_size = size_in_bytes
if not isinstance(file_to_read, filesystem.CompressedFile):
# TODO(#482): Find a better solution to handling compressed files.
all_lines_size -= header_size
else:
all_lines_size *= 2
first_record = file_to_read.readline()
while not first_record or not first_record.strip():
first_record = file_to_read.readline()
line_size = len(first_record)
return float(all_lines_size) / line_size
def read_records(
self,
file_name, # type: str
unused_range_tracker # type: range_trackers.UnsplittableRangeTracker
):
# type: (...) -> Iterable[VcfEstimate]
with filesystems.FileSystems.open(
file_name, compression_type=self._compression_type) as file_to_read:
header_size, samples = self._get_header_info(file_to_read, file_name)
estimated_variant_count = self._estimate_variant_count(
file_to_read, file_name, header_size)
size_in_bytes = filesystems.FileSystems.match(
[file_name])[0].metadata_list[0].size_in_bytes
if isinstance(file_to_read, filesystem.CompressedFile):
size_in_bytes *= 2
yield VcfEstimate(file_name=file_name,
samples=samples,
estimated_variant_count=estimated_variant_count,
size_in_bytes=size_in_bytes)
class GetEstimates(transforms.PTransform):
"""Reads files until the first data line and extracts input sizes."""
def __init__(
self,
file_pattern, # type: str
compression_type=filesystem.CompressionTypes.AUTO, # type: str
validate=True, # type: bool
**kwargs # type: **str
):
# type: (...) -> None
"""Initialize the :class:`GetEstimates` transform.
Args:
file_pattern: The file path to read from either as a single file or a glob
pattern.
compression_type: Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
validate: Flag to verify that the files exist during the pipeline creation
time.
"""
super().__init__(**kwargs)
self._source = VcfEstimateSource(
file_pattern,
compression_type,
validate=validate)
def expand(self, pvalue):
return pvalue.pipeline | iobase.Read(self._source)
def _create_vcf_estimate_source(file_pattern,
compression_type=None):
return VcfEstimateSource(file_pattern=file_pattern,
compression_type=compression_type)
class GetAllEstimates(transforms.PTransform):
"""Reads files until the first data line and extracts input sizes.
This transform is to be preferred over GetEstimates for large number of files.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=filesystem.CompressionTypes.AUTO,
**kwargs):
# type: (int, str, **str) -> None
"""Initialize the :class:`GetAllEstimates` transform.
Args:
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type: Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
"""
super().__init__(**kwargs)
source_from_file = partial(
_create_vcf_estimate_source,
compression_type=compression_type)
self._read_all_files = filebasedsource.ReadAllFiles(
False, # splittable (we are just reading the headers)
filesystem.CompressionTypes.AUTO, desired_bundle_size,
0, # min_bundle_size
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
| {
"content_hash": "2dd24d4c31999f12afd084205789623a",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 80,
"avg_line_length": 37.04265402843602,
"alnum_prop": 0.6500767656090072,
"repo_name": "googlegenomics/gcp-variant-transforms",
"id": "7c0a2ed7bbfa6c0e89c0c8cdbf76f0db4e26d9aa",
"size": "8394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcp_variant_transforms/beam_io/vcf_estimate_io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3534"
},
{
"name": "Python",
"bytes": "1101324"
},
{
"name": "Shell",
"bytes": "17097"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('/home/mharris/Projects/Personal/eventify/eventify'))
sys.path.insert(1, os.path.abspath('/home/mharris/Projects/Personal/eventify/eventify/drivers'))
sys.path.insert(2, os.path.abspath('/home/mharris/Projects/Personal/eventify/eventify/exceptions'))
sys.path.insert(3, os.path.abspath('/home/mharris/Projects/Personal/eventify/eventify/event'))
sys.path.insert(4, os.path.abspath('/home/mharris/Projects/Personal/eventify/eventify/tracking'))
sys.path.insert(5, os.path.abspath('/home/mharris/Projects/Personal/eventify/eventify/persist'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinxcontrib.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'eventify'
copyright = '2017, Matthew Harris'
author = 'Matthew Harris'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5.26'
# The full version, including alpha/beta/rc tags.
release = '0.5.26'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**test**']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'eventifydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'eventify.tex', 'eventify Documentation',
'Matthew Harris', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'eventify', 'eventify Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'eventify', 'eventify Documentation',
author, 'eventify', 'One line description of project.',
'Miscellaneous'),
]
| {
"content_hash": "568b81d8c76f37163c323b8c290e363c",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 99,
"avg_line_length": 32.723404255319146,
"alnum_prop": 0.6755526657997399,
"repo_name": "morissette/eventify",
"id": "c5f569d14026428aac37ee8947e59734cdd4c6b7",
"size": "4875",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11221"
}
],
"symlink_target": ""
} |
import os
import sys
import unittest
import subprocess
from glob import glob
def clean_output(pattern='*.nc'):
[os.unlink(fname) for fname in glob(pattern)]
class RunNotebooks(unittest.TestCase):
def setUp(self):
files = []
path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
for root, dirs, fnames in os.walk(path):
for fname in fnames:
if fname.endswith(".ipynb"):
files.append(os.path.join(root, fname))
self.files = files
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_notebooks(self):
"""Test the notebook with runipy."""
for fname in self.files:
folder, ipy_file = os.path.split(fname)
# If reading or saving in that directory.
os.chdir(folder)
sys.path.append(folder)
print("Running {}".format(ipy_file))
ans = subprocess.check_call(['runipy',
ipy_file,
'--html',
ipy_file[:-5] + 'html'])
# Successful command does not mean Successful notebook! We should
# start raising failures that will propagate here.
self.assertEqual(ans, 0)
sys.path.pop()
def main():
unittest.main()
if __name__ == '__main__':
main()
| {
"content_hash": "53682fa454674ee7e559a59ec1cb0e9f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 29.604166666666668,
"alnum_prop": 0.5327234342012667,
"repo_name": "rsignell-usgs/notebook",
"id": "9cf605502ce475a605cf657668f7f499cb406a3f",
"size": "1659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system-test/test/test_notebooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13956"
},
{
"name": "Jupyter Notebook",
"bytes": "202646883"
},
{
"name": "Python",
"bytes": "1476735"
}
],
"symlink_target": ""
} |
'''
run_client.py - An example client using the python socket
implementation of the Google Protocol Buffers.
This module is an executable script demonstrating the usage of the
python socket implementation of the Google Protocol Buffers. To work
correctly, the script requires a server to be running first
(i.e. run_server.py).
Authors: Martin Norbury (mnorbury@lcogt.net)
Eric Saunders (esaunders@lcogt.net)
Zach Walker (zwalker@lcogt.net)
Jan Dittberner (jan@dittberner.info)
May 2009, Nov 2010
'''
# Add main protobuf module to classpath
import sys
sys.path.append('../../main')
import time_pb2 as proto
from protobuf.socketrpc import RpcService
import logging
log = logging.getLogger(__name__)
hostname = 'localhost'
port = 8090
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
log.debug("test")
# Create request message
request = proto.TimeRequest()
service = RpcService(proto.TimeService_Stub, port, hostname)
try:
response = service.getTime(request, timeout=1000)
log.info(response)
except Exception, ex:
log.exception(ex)
| {
"content_hash": "2a65378debe445c498de080baf90a9dd",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 68,
"avg_line_length": 26.441860465116278,
"alnum_prop": 0.712401055408971,
"repo_name": "4ntoine/protobuf-socket-rpc",
"id": "6f9806329888962739b858f995a2a0dc4c0a6203",
"size": "2309",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "python/src/protobuf/socketrpc/examples/time/run_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41025"
},
{
"name": "Java",
"bytes": "172370"
},
{
"name": "Protocol Buffer",
"bytes": "17363"
},
{
"name": "Python",
"bytes": "149231"
}
],
"symlink_target": ""
} |
import mock
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
from oslo_config import cfg
from manila.db import base
from manila import exception
from manila.network.neutron import api as neutron_api
from manila.network.neutron import constants as neutron_constants
from manila import test
from manila.tests.db import fakes
from manila.tests import utils as test_utils
CONF = cfg.CONF
class FakeNeutronClient(object):
def create_port(self, body):
return body
def delete_port(self, port_id):
pass
def show_port(self, port_id):
pass
def list_ports(self, **search_opts):
pass
def list_networks(self):
pass
def show_network(self, network_uuid):
pass
def show_subnet(self, subnet_uuid):
pass
def create_router(self, body):
return body
def list_routers(self):
pass
def create_network(self, body):
return body
def create_subnet(self, body):
return body
def update_port(self, port_id, body):
return body
def add_interface_router(self, router_id, subnet_id, port_id):
pass
def update_router(self, router_id, body):
return body
def show_router(self, router_id):
pass
def list_extensions(self):
pass
class NeutronclientTestCase(test.TestCase):
def test_no_auth_obj(self):
mock_client_loader = self.mock_object(
neutron_api.client_auth, 'AuthClientLoader')
fake_context = 'fake_context'
data = {
'DEFAULT': {
'neutron_admin_username': 'foo_username',
'neutron_admin_password': 'foo_password',
'neutron_admin_tenant_name': 'foo_tenant_name',
'neutron_admin_auth_url': 'foo_auth_url',
},
'neutron': {
'endpoint_type': 'foo_endpoint_type',
'region_name': 'foo_region_name',
}
}
self.client = None
with test_utils.create_temp_config_with_opts(data):
self.client = neutron_api.API()
self.client.get_client(fake_context)
mock_client_loader.assert_called_once_with(
client_class=neutron_api.clientv20.Client,
exception_module=neutron_api.neutron_client_exc,
cfg_group=neutron_api.NEUTRON_GROUP,
deprecated_opts_for_v2={
'username': data['DEFAULT']['neutron_admin_username'],
'password': data['DEFAULT']['neutron_admin_password'],
'tenant_name': data['DEFAULT']['neutron_admin_tenant_name'],
'auth_url': data['DEFAULT']['neutron_admin_auth_url'],
},
)
mock_client_loader.return_value.get_client.assert_called_once_with(
self.client,
fake_context,
endpoint_type=data['neutron']['endpoint_type'],
region_name=data['neutron']['region_name'],
)
def test_with_auth_obj(self):
fake_context = 'fake_context'
data = {
'neutron': {
'endpoint_type': 'foo_endpoint_type',
'region_name': 'foo_region_name',
}
}
self.client = None
with test_utils.create_temp_config_with_opts(data):
self.client = neutron_api.API()
self.client.auth_obj = type(
'FakeAuthObj', (object, ), {'get_client': mock.Mock()})
self.client.get_client(fake_context)
self.client.auth_obj.get_client.assert_called_once_with(
self.client,
fake_context,
endpoint_type=data['neutron']['endpoint_type'],
region_name=data['neutron']['region_name'],
)
class NeutronApiTest(test.TestCase):
def setUp(self):
super(NeutronApiTest, self).setUp()
self.mock_object(base, 'Base', fakes.FakeModel)
self.mock_object(
clientv20, 'Client', mock.Mock(return_value=FakeNeutronClient()))
self.neutron_api = neutron_api.API()
def test_create_api_object(self):
# instantiate Neutron API object
neutron_api_instance = neutron_api.API()
# Verify results
self.assertTrue(hasattr(neutron_api_instance, 'client'))
self.assertTrue(hasattr(neutron_api_instance, 'configuration'))
self.assertEqual('DEFAULT', neutron_api_instance.config_group_name)
def test_create_api_object_custom_config_group(self):
# Set up test data
fake_config_group_name = 'fake_config_group_name'
# instantiate Neutron API object
obj = neutron_api.API(fake_config_group_name)
obj.get_client(mock.Mock())
# Verify results
self.assertTrue(clientv20.Client.called)
self.assertTrue(hasattr(obj, 'client'))
self.assertTrue(hasattr(obj, 'configuration'))
self.assertEqual(
fake_config_group_name, obj.configuration._group.name)
def test_create_port_with_all_args(self):
# Set up test data
self.mock_object(self.neutron_api, '_has_port_binding_extension',
mock.Mock(return_value=True))
port_args = {
'tenant_id': 'test tenant', 'network_id': 'test net',
'host_id': 'test host', 'subnet_id': 'test subnet',
'fixed_ip': 'test ip', 'device_owner': 'test owner',
'device_id': 'test device', 'mac_address': 'test mac',
'security_group_ids': 'test group',
'dhcp_opts': 'test dhcp',
}
# Execute method 'create_port'
port = self.neutron_api.create_port(**port_args)
# Verify results
self.assertEqual(port_args['tenant_id'], port['tenant_id'])
self.assertEqual(port_args['network_id'], port['network_id'])
self.assertEqual(port_args['host_id'], port['binding:host_id'])
self.assertEqual(port_args['subnet_id'],
port['fixed_ips'][0]['subnet_id'])
self.assertEqual(port_args['fixed_ip'],
port['fixed_ips'][0]['ip_address'])
self.assertEqual(port_args['device_owner'], port['device_owner'])
self.assertEqual(port_args['device_id'], port['device_id'])
self.assertEqual(port_args['mac_address'], port['mac_address'])
self.assertEqual(port_args['security_group_ids'],
port['security_groups'])
self.assertEqual(port_args['dhcp_opts'], port['extra_dhcp_opts'])
self.neutron_api._has_port_binding_extension.assert_called_once_with()
self.assertTrue(clientv20.Client.called)
def test_create_port_with_required_args(self):
# Set up test data
port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'}
# Execute method 'create_port'
port = self.neutron_api.create_port(**port_args)
# Verify results
self.assertEqual(port_args['tenant_id'], port['tenant_id'])
self.assertEqual(port_args['network_id'],
port['network_id'])
self.assertTrue(clientv20.Client.called)
def test_create_port_with_additional_kwargs(self):
# Set up test data
port_args = {'tenant_id': 'test tenant', 'network_id': 'test net',
'binding_arg': 'foo'}
# Execute method 'create_port'
port = self.neutron_api.create_port(**port_args)
# Verify results
self.assertEqual(port_args['tenant_id'], port['tenant_id'])
self.assertEqual(port_args['network_id'],
port['network_id'])
self.assertEqual(port_args['binding_arg'],
port['binding_arg'])
self.assertTrue(clientv20.Client.called)
def test_create_port_with_host_id_no_binding_ext(self):
self.mock_object(self.neutron_api, '_has_port_binding_extension',
mock.Mock(return_value=False))
port_args = {
'tenant_id': 'test tenant',
'network_id': 'test net',
'host_id': 'foohost'
}
self.assertRaises(exception.NetworkException,
self.neutron_api.create_port, **port_args)
@mock.patch.object(neutron_api.LOG, 'exception', mock.Mock())
def test_create_port_exception(self):
self.mock_object(
self.neutron_api.client, 'create_port',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'}
# Execute method 'create_port'
self.assertRaises(exception.NetworkException,
self.neutron_api.create_port,
**port_args)
# Verify results
self.assertTrue(neutron_api.LOG.exception.called)
self.assertTrue(clientv20.Client.called)
self.assertTrue(self.neutron_api.client.create_port.called)
@mock.patch.object(neutron_api.LOG, 'exception', mock.Mock())
def test_create_port_exception_status_409(self):
# Set up test data
self.mock_object(
self.neutron_api.client, 'create_port',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException(
status_code=409)))
port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'}
# Execute method 'create_port'
self.assertRaises(exception.PortLimitExceeded,
self.neutron_api.create_port,
**port_args)
# Verify results
self.assertTrue(neutron_api.LOG.exception.called)
self.assertTrue(clientv20.Client.called)
self.assertTrue(self.neutron_api.client.create_port.called)
def test_delete_port(self):
# Set up test data
self.mock_object(self.neutron_api.client, 'delete_port')
port_id = 'test port id'
# Execute method 'delete_port'
self.neutron_api.delete_port(port_id)
# Verify results
self.neutron_api.client.delete_port.assert_called_once_with(port_id)
self.assertTrue(clientv20.Client.called)
def test_list_ports(self):
# Set up test data
search_opts = {'test_option': 'test_value'}
fake_ports = [{'fake port': 'fake port info'}]
self.mock_object(
self.neutron_api.client, 'list_ports',
mock.Mock(return_value={'ports': fake_ports}))
# Execute method 'list_ports'
ports = self.neutron_api.list_ports(**search_opts)
# Verify results
self.assertEqual(fake_ports, ports)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.list_ports.assert_called_once_with(
**search_opts)
def test_show_port(self):
# Set up test data
port_id = 'test port id'
fake_port = {'fake port': 'fake port info'}
self.mock_object(
self.neutron_api.client, 'show_port',
mock.Mock(return_value={'port': fake_port}))
# Execute method 'show_port'
port = self.neutron_api.show_port(port_id)
# Verify results
self.assertEqual(fake_port, port)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.show_port.assert_called_once_with(port_id)
def test_get_network(self):
# Set up test data
network_id = 'test network id'
fake_network = {'fake network': 'fake network info'}
self.mock_object(
self.neutron_api.client, 'show_network',
mock.Mock(return_value={'network': fake_network}))
# Execute method 'get_network'
network = self.neutron_api.get_network(network_id)
# Verify results
self.assertEqual(fake_network, network)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.show_network.assert_called_once_with(
network_id)
def test_get_subnet(self):
# Set up test data
subnet_id = 'fake subnet id'
self.mock_object(
self.neutron_api.client, 'show_subnet',
mock.Mock(return_value={'subnet': {}}))
# Execute method 'get_subnet'
subnet = self.neutron_api.get_subnet(subnet_id)
# Verify results
self.assertEqual({}, subnet)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.show_subnet.assert_called_once_with(
subnet_id)
def test_get_all_network(self):
# Set up test data
fake_networks = [{'fake network': 'fake network info'}]
self.mock_object(
self.neutron_api.client, 'list_networks',
mock.Mock(return_value={'networks': fake_networks}))
# Execute method 'get_all_networks'
networks = self.neutron_api.get_all_networks()
# Verify results
self.assertEqual(fake_networks, networks)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.list_networks.assert_called_once_with()
def test_list_extensions(self):
# Set up test data
extensions = [
{'name': neutron_constants.PORTBINDING_EXT},
{'name': neutron_constants.PROVIDER_NW_EXT},
]
self.mock_object(
self.neutron_api.client, 'list_extensions',
mock.Mock(return_value={'extensions': extensions}))
# Execute method 'list_extensions'
result = self.neutron_api.list_extensions()
# Verify results
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.list_extensions.assert_called_once_with()
self.assertIn(neutron_constants.PORTBINDING_EXT, result)
self.assertIn(neutron_constants.PROVIDER_NW_EXT, result)
self.assertEqual(
extensions[0], result[neutron_constants.PORTBINDING_EXT])
self.assertEqual(
extensions[1], result[neutron_constants.PROVIDER_NW_EXT])
def test_create_network(self):
# Set up test data
net_args = {'tenant_id': 'test tenant', 'name': 'test name'}
# Execute method 'network_create'
network = self.neutron_api.network_create(**net_args)
# Verify results
self.assertEqual(net_args['tenant_id'], network['tenant_id'])
self.assertEqual(net_args['name'], network['name'])
self.assertTrue(clientv20.Client.called)
def test_create_subnet(self):
# Set up test data
subnet_args = {
'tenant_id': 'test tenant',
'name': 'test name',
'net_id': 'test net id',
'cidr': '10.0.0.0/24',
}
# Execute method 'subnet_create'
subnet = self.neutron_api.subnet_create(**subnet_args)
# Verify results
self.assertEqual(subnet_args['tenant_id'], subnet['tenant_id'])
self.assertEqual(subnet_args['name'], subnet['name'])
self.assertTrue(clientv20.Client.called)
def test_create_router(self):
# Set up test data
router_args = {'tenant_id': 'test tenant', 'name': 'test name'}
# Execute method 'router_create'
router = self.neutron_api.router_create(**router_args)
# Verify results
self.assertEqual(router_args['tenant_id'], router['tenant_id'])
self.assertEqual(router_args['name'], router['name'])
self.assertTrue(clientv20.Client.called)
def test_list_routers(self):
# Set up test data
fake_routers = [{'fake router': 'fake router info'}]
self.mock_object(
self.neutron_api.client, 'list_routers',
mock.Mock(return_value={'routers': fake_routers}))
# Execute method 'router_list'
networks = self.neutron_api.router_list()
# Verify results
self.assertEqual(fake_routers, networks)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.list_routers.assert_called_once_with()
def test_create_network_exception(self):
# Set up test data
net_args = {'tenant_id': 'test tenant', 'name': 'test name'}
self.mock_object(
self.neutron_api.client, 'create_network',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'network_create'
self.assertRaises(
exception.NetworkException,
self.neutron_api.network_create,
**net_args)
# Verify results
self.neutron_api.client.create_network.assert_called_once_with(
{'network': net_args})
self.assertTrue(clientv20.Client.called)
def test_create_subnet_exception(self):
# Set up test data
subnet_args = {
'tenant_id': 'test tenant',
'name': 'test name',
'net_id': 'test net id',
'cidr': '10.0.0.0/24',
}
self.mock_object(
self.neutron_api.client, 'create_subnet',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'subnet_create'
self.assertRaises(
exception.NetworkException,
self.neutron_api.subnet_create,
**subnet_args)
# Verify results
expected_data = {
'network_id': subnet_args['net_id'],
'tenant_id': subnet_args['tenant_id'],
'cidr': subnet_args['cidr'],
'name': subnet_args['name'],
'ip_version': 4,
}
self.neutron_api.client.create_subnet.assert_called_once_with(
{'subnet': expected_data})
self.assertTrue(clientv20.Client.called)
def test_create_router_exception(self):
# Set up test data
router_args = {'tenant_id': 'test tenant', 'name': 'test name'}
self.mock_object(
self.neutron_api.client, 'create_router',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'router_create'
self.assertRaises(
exception.NetworkException,
self.neutron_api.router_create,
**router_args)
# Verify results
self.neutron_api.client.create_router.assert_called_once_with(
{'router': router_args})
self.assertTrue(clientv20.Client.called)
def test_update_port_fixed_ips(self):
# Set up test data
port_id = 'test_port'
fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]}
# Execute method 'update_port_fixed_ips'
port = self.neutron_api.update_port_fixed_ips(port_id, fixed_ips)
# Verify results
self.assertEqual(fixed_ips, port)
self.assertTrue(clientv20.Client.called)
def test_update_port_fixed_ips_exception(self):
# Set up test data
port_id = 'test_port'
fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]}
self.mock_object(
self.neutron_api.client, 'update_port',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'update_port_fixed_ips'
self.assertRaises(
exception.NetworkException,
self.neutron_api.update_port_fixed_ips,
port_id, fixed_ips)
# Verify results
self.neutron_api.client.update_port.assert_called_once_with(
port_id, {'port': fixed_ips})
self.assertTrue(clientv20.Client.called)
def test_router_update_routes(self):
# Set up test data
router_id = 'test_router'
routes = {
'routes': [
{'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8', },
],
}
# Execute method 'router_update_routes'
router = self.neutron_api.router_update_routes(router_id, routes)
# Verify results
self.assertEqual(routes, router)
self.assertTrue(clientv20.Client.called)
def test_router_update_routes_exception(self):
# Set up test data
router_id = 'test_router'
routes = {
'routes': [
{'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8', },
],
}
self.mock_object(
self.neutron_api.client, 'update_router',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'router_update_routes'
self.assertRaises(
exception.NetworkException,
self.neutron_api.router_update_routes,
router_id, routes)
# Verify results
self.neutron_api.client.update_router.assert_called_once_with(
router_id, {'router': routes})
self.assertTrue(clientv20.Client.called)
def test_show_router(self):
# Set up test data
router_id = 'test router id'
fake_router = {'fake router': 'fake router info'}
self.mock_object(
self.neutron_api.client, 'show_router',
mock.Mock(return_value={'router': fake_router}))
# Execute method 'show_router'
port = self.neutron_api.show_router(router_id)
# Verify results
self.assertEqual(fake_router, port)
self.assertTrue(clientv20.Client.called)
self.neutron_api.client.show_router.assert_called_once_with(router_id)
def test_router_add_interface(self):
# Set up test data
router_id = 'test port id'
subnet_id = 'test subnet id'
port_id = 'test port id'
self.mock_object(self.neutron_api.client, 'add_interface_router')
# Execute method 'router_add_interface'
self.neutron_api.router_add_interface(router_id, subnet_id, port_id)
# Verify results
self.neutron_api.client.add_interface_router.assert_called_once_with(
port_id, {'subnet_id': subnet_id, 'port_id': port_id})
self.assertTrue(clientv20.Client.called)
def test_router_add_interface_exception(self):
# Set up test data
router_id = 'test port id'
subnet_id = 'test subnet id'
port_id = 'test port id'
self.mock_object(
self.neutron_api.client, 'add_interface_router',
mock.Mock(side_effect=neutron_client_exc.NeutronClientException))
# Execute method 'router_add_interface'
self.assertRaises(
exception.NetworkException,
self.neutron_api.router_add_interface,
router_id, subnet_id, port_id)
# Verify results
self.neutron_api.client.add_interface_router.assert_called_once_with(
router_id, {'subnet_id': subnet_id, 'port_id': port_id})
self.assertTrue(clientv20.Client.called)
def test_admin_project_id_exist(self):
fake_admin_project_id = 'fake_admin_project_id_value'
self.neutron_api.client.httpclient = mock.Mock()
self.neutron_api.client.httpclient.auth_token = mock.Mock()
self.neutron_api.client.httpclient.get_project_id = mock.Mock(
return_value=fake_admin_project_id)
admin_project_id = self.neutron_api.admin_project_id
self.assertEqual(fake_admin_project_id, admin_project_id)
self.neutron_api.client.httpclient.auth_token.called
def test_admin_project_id_not_exist(self):
fake_admin_project_id = 'fake_admin_project_id_value'
self.neutron_api.client.httpclient = mock.Mock()
self.neutron_api.client.httpclient.auth_token = mock.Mock(
return_value=None)
self.neutron_api.client.httpclient.authenticate = mock.Mock()
self.neutron_api.client.httpclient.get_project_id = mock.Mock(
return_value=fake_admin_project_id)
admin_project_id = self.neutron_api.admin_project_id
self.assertEqual(fake_admin_project_id, admin_project_id)
self.neutron_api.client.httpclient.auth_token.called
self.neutron_api.client.httpclient.authenticate.called
def test_admin_project_id_not_exist_with_failure(self):
self.neutron_api.client.httpclient = mock.Mock()
self.neutron_api.client.httpclient.auth_token = None
self.neutron_api.client.httpclient.authenticate = mock.Mock(
side_effect=neutron_client_exc.NeutronClientException)
self.neutron_api.client.httpclient.auth_tenant_id = mock.Mock()
try:
self.neutron_api.admin_project_id
except exception.NetworkException:
pass
else:
raise Exception('Expected error was not raised')
self.assertTrue(self.neutron_api.client.httpclient.authenticate.called)
self.assertFalse(
self.neutron_api.client.httpclient.auth_tenant_id.called)
def test_get_all_admin_project_networks(self):
fake_networks = {'networks': ['fake_net_1', 'fake_net_2']}
self.mock_object(
self.neutron_api.client, 'list_networks',
mock.Mock(return_value=fake_networks))
self.neutron_api.client.httpclient = mock.Mock()
self.neutron_api.client.httpclient.auth_token = mock.Mock()
self.neutron_api.client.httpclient.auth_tenant_id = mock.Mock()
networks = self.neutron_api.get_all_admin_project_networks()
self.assertEqual(fake_networks['networks'], networks)
self.neutron_api.client.httpclient.auth_token.called
self.neutron_api.client.httpclient.auth_tenant_id.called
self.neutron_api.client.list_networks.assert_called_once_with(
tenant_id=self.neutron_api.admin_project_id, shared=False)
| {
"content_hash": "9e051ff9ae8964d05c3ff2f9bfb0fd62",
"timestamp": "",
"source": "github",
"line_count": 696,
"max_line_length": 79,
"avg_line_length": 36.75431034482759,
"alnum_prop": 0.603299323716821,
"repo_name": "bswartz/manila",
"id": "a6e59f83e3a30e6380af7029d6399336bb61c226",
"size": "26247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/tests/network/neutron/test_neutron_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9952105"
},
{
"name": "Shell",
"bytes": "106606"
}
],
"symlink_target": ""
} |
from django.shortcuts import render_to_response
from django.http import HttpResponseBadRequest, HttpResponseForbidden, HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from golem.models import Alarm
def index_view(request):
return render_to_response("index.html")
def login_view(request):
context = RequestContext(request)
if request.method == "GET":
return render_to_response("login.html",context_instance=context)
else:
try:
username = request.POST['login-username']
password = request.POST['login-password']
except KeyError:
return HttpResponseBadRequest("Need to include all required parameters: login-username, login-password")
user = auth.authenticate(username=username,password=password)
if user is not None:
if user.is_active:
auth.login(request,user)
return HttpResponseRedirect("/main")
else:
return HttpResponseForbidden("Your user is inactive")
else:
return HttpResponseForbidden("Incorrect username or password")
def stop_alarm_view(request):
import pika as p
connection = p.BlockingConnection(p.ConnectionParameters("localhost"))
channel = connection.channel()
channel.basic_publish(exchange = '',
routing_key='commands',
body='alarm_cancel')
connection.close()
return HttpResponse("Alarm stopped")
def snooze_alarm_view(request):
import pika as p
connection = p.BlockingConnection(p.ConnectionParameters("localhost"))
channel = connection.channel()
channel.basic_publish(exchange = '',
routing_key='commands',
body='alarm_snooze')
connection.close()
return HttpResponse("Alarm snoozed")
@login_required
def main_view(request):
context_dict = {"recurring":Alarm.objects.filter(manual=True,time__year=1970),
"one_off":Alarm.objects.filter(manual=True).exclude(time__year=1970)}
c = RequestContext(request, context_dict)
return render_to_response("main.html", context_instance=c)
@login_required
def logout_view(request):
auth.logout(request)
return HttpResponseRedirect("/")
@login_required
def test_display_view(request):
import pika as p
connection = p.BlockingConnection(p.ConnectionParameters("localhost"))
channel = connection.channel()
channel.basic_publish(exchange = "",
routing_key = "commands",
body = "displaytest")
connection.close()
return HttpResponse()
| {
"content_hash": "48e251782b9d4af7803040748c5a2c29",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 107,
"avg_line_length": 30.301204819277107,
"alnum_prop": 0.7284294234592446,
"repo_name": "Tyler-Ward/GolemClock",
"id": "72eca96157032face3663c470d307aea1541c49a",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webface/golem/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "59252"
},
{
"name": "Python",
"bytes": "51747"
},
{
"name": "Shell",
"bytes": "247"
}
],
"symlink_target": ""
} |
from django import template
from Instanssi.kompomaatti.models import Event
register = template.Library()
@register.inclusion_tag('admin_base/tags/event_nav_items.html')
def render_base_events_nav():
return {'events': Event.objects.all().order_by('-date')}
@register.simple_tag
def event_name(event_id):
try:
event = Event.objects.get(pk=event_id)
return event.name
except:
pass
return '' | {
"content_hash": "189c49287ff14884c2dd8fefcd09e6d8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 25.294117647058822,
"alnum_prop": 0.6906976744186046,
"repo_name": "Instanssi/Instanssi.org",
"id": "8ee912c43bdeb3b4726ae4a805c3d479844e72cc",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Instanssi/admin_base/templatetags/admin_base_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68380"
},
{
"name": "HTML",
"bytes": "601649"
},
{
"name": "JavaScript",
"bytes": "793086"
},
{
"name": "Python",
"bytes": "457189"
},
{
"name": "SCSS",
"bytes": "375144"
},
{
"name": "Shell",
"bytes": "1345"
}
],
"symlink_target": ""
} |
from .boost import BoostDependency
from .cuda import CudaDependency
from .hdf5 import hdf5_factory
from .base import Dependency, InternalDependency, ExternalDependency, NotFoundDependency
from .base import (
ExternalLibrary, DependencyException, DependencyMethods,
BuiltinDependency, SystemDependency, get_leaf_external_dependencies)
from .cmake import CMakeDependency
from .configtool import ConfigToolDependency
from .dub import DubDependency
from .framework import ExtraFrameworkDependency
from .pkgconfig import PkgConfigDependency
from .factory import DependencyFactory
from .detect import find_external_dependency, get_dep_identifier, packages, _packages_accept_language
from .dev import (
ValgrindDependency, JNISystemDependency, JDKSystemDependency, gmock_factory, gtest_factory,
llvm_factory, zlib_factory)
from .coarrays import coarray_factory
from .mpi import mpi_factory
from .scalapack import scalapack_factory
from .misc import (
BlocksDependency, OpenMPDependency, cups_factory, curses_factory, gpgme_factory,
libgcrypt_factory, libwmf_factory, netcdf_factory, pcap_factory, python3_factory,
shaderc_factory, threads_factory, ThreadDependency, iconv_factory, intl_factory,
dl_factory, openssl_factory, libcrypto_factory, libssl_factory,
)
from .platform import AppleFrameworks
from .qt import qt4_factory, qt5_factory, qt6_factory
from .ui import GnuStepDependency, WxDependency, gl_factory, sdl2_factory, vulkan_factory
__all__ = [
'Dependency',
'InternalDependency',
'ExternalDependency',
'SystemDependency',
'BuiltinDependency',
'NotFoundDependency',
'ExternalLibrary',
'DependencyException',
'DependencyMethods',
'CMakeDependency',
'ConfigToolDependency',
'DubDependency',
'ExtraFrameworkDependency',
'PkgConfigDependency',
'DependencyFactory',
'ThreadDependency',
'find_external_dependency',
'get_dep_identifier',
'get_leaf_external_dependencies',
]
"""Dependency representations and discovery logic.
Meson attempts to largely abstract away dependency discovery information, and
to encapsulate that logic itself so that the DSL doesn't have too much direct
information. There are some cases where this is impossible/undesirable, such
as the `get_variable()` method.
Meson has four primary dependency types:
1. pkg-config
2. apple frameworks
3. CMake
4. system
Plus a few more niche ones.
When a user calls `dependency('foo')` Meson creates a list of candidates, and
tries those candidates in order to find one that matches the criteria
provided by the user (such as version requirements, or optional components
that are required.)
Except to work around bugs or handle odd corner cases, pkg-config and CMake
generally just work™, though there are exceptions. Most of this package is
concerned with dependencies that don't (always) provide CMake and/or
pkg-config files.
For these cases one needs to write a `system` dependency. These dependencies
descend directly from `ExternalDependency`, in their constructor they
manually set up the necessary link and compile args (and additional
dependencies as necessary).
For example, imagine a dependency called Foo, it uses an environment variable
called `$FOO_ROOT` to point to its install root, which looks like this:
```txt
$FOOROOT
→ include/
→ lib/
```
To use Foo, you need its include directory, and you need to link to
`lib/libfoo.ext`.
You could write code that looks like:
```python
class FooSystemDependency(ExternalDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
root = os.environ.get('FOO_ROOT')
if root is None:
mlog.debug('$FOO_ROOT is unset.')
self.is_found = False
return
lib = self.clib_compiler.find_library('foo', environment, [os.path.join(root, 'lib')])
if lib is None:
mlog.debug('Could not find lib.')
self.is_found = False
return
self.compile_args.append(f'-I{os.path.join(root, "include")}')
self.link_args.append(lib)
self.is_found = True
```
This code will look for `FOO_ROOT` in the environment, handle `FOO_ROOT` being
undefined gracefully, then set its `compile_args` and `link_args` gracefully.
It will also gracefully handle not finding the required lib (hopefully that
doesn't happen, but it could if, for example, the lib is only static and
shared linking is requested).
There are a couple of things about this that still aren't ideal. For one, we
don't want to be reading random environment variables at this point. Those
should actually be added to `envconfig.Properties` and read in
`environment.Environment._set_default_properties_from_env` (see how
`BOOST_ROOT` is handled). We can also handle the `static` keyword and the
`prefer_static` built-in option. So now that becomes:
```python
class FooSystemDependency(ExternalDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
root = environment.properties[self.for_machine].foo_root
if root is None:
mlog.debug('foo_root is unset.')
self.is_found = False
return
get_option = environment.coredata.get_option
static_opt = kwargs.get('static', get_option(Mesonlib.OptionKey('prefer_static'))
static = Mesonlib.LibType.STATIC if static_opt else Mesonlib.LibType.SHARED
lib = self.clib_compiler.find_library(
'foo', environment, [os.path.join(root, 'lib')], libtype=static)
if lib is None:
mlog.debug('Could not find lib.')
self.is_found = False
return
self.compile_args.append(f'-I{os.path.join(root, "include")}')
self.link_args.append(lib)
self.is_found = True
```
This is nicer in a couple of ways. First we can properly cross compile as we
are allowed to set `FOO_ROOT` for both the build and host machines, it also
means that users can override this in their machine files, and if that
environment variables changes during a Meson reconfigure Meson won't re-read
it, this is important for reproducibility. Finally, Meson will figure out
whether it should be finding `libfoo.so` or `libfoo.a` (or the platform
specific names). Things are looking pretty good now, so it can be added to
the `packages` dict below:
```python
packages.update({
'foo': FooSystemDependency,
})
```
Now, what if foo also provides pkg-config, but it's only shipped on Unices,
or only included in very recent versions of the dependency? We can use the
`DependencyFactory` class:
```python
foo_factory = DependencyFactory(
'foo',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
system_class=FooSystemDependency,
)
```
This is a helper function that will generate a default pkg-config based
dependency, and use the `FooSystemDependency` as well. It can also handle
custom finders for pkg-config and cmake based dependencies that need some
extra help. You would then add the `foo_factory` to packages instead of
`FooSystemDependency`:
```python
packages.update({
'foo': foo_factory,
})
```
If you have a dependency that is very complicated, (such as having multiple
implementations) you may need to write your own factory function. There are a
number of examples in this package.
_Note_ before we moved to factory functions it was common to use an
`ExternalDependency` class that would instantiate different types of
dependencies and hold the one it found. There are a number of drawbacks to
this approach, and no new dependencies should do this.
"""
# This is a dict where the keys should be strings, and the values must be one
# of:
# - An ExternalDependency subclass
# - A DependencyFactory object
# - A callable with a signature of (Environment, MachineChoice, Dict[str, Any]) -> List[Callable[[], ExternalDependency]]
packages.update({
# From dev:
'gtest': gtest_factory,
'gmock': gmock_factory,
'llvm': llvm_factory,
'valgrind': ValgrindDependency,
'zlib': zlib_factory,
'jni': JNISystemDependency,
'jdk': JDKSystemDependency,
'boost': BoostDependency,
'cuda': CudaDependency,
# per-file
'coarray': coarray_factory,
'hdf5': hdf5_factory,
'mpi': mpi_factory,
'scalapack': scalapack_factory,
# From misc:
'blocks': BlocksDependency,
'curses': curses_factory,
'netcdf': netcdf_factory,
'openmp': OpenMPDependency,
'python3': python3_factory,
'threads': threads_factory,
'pcap': pcap_factory,
'cups': cups_factory,
'libwmf': libwmf_factory,
'libgcrypt': libgcrypt_factory,
'gpgme': gpgme_factory,
'shaderc': shaderc_factory,
'iconv': iconv_factory,
'intl': intl_factory,
'dl': dl_factory,
'openssl': openssl_factory,
'libcrypto': libcrypto_factory,
'libssl': libssl_factory,
# From platform:
'appleframeworks': AppleFrameworks,
# From ui:
'gl': gl_factory,
'gnustep': GnuStepDependency,
'qt4': qt4_factory,
'qt5': qt5_factory,
'qt6': qt6_factory,
'sdl2': sdl2_factory,
'wxwidgets': WxDependency,
'vulkan': vulkan_factory,
})
_packages_accept_language.update({
'hdf5',
'mpi',
'netcdf',
'openmp',
})
| {
"content_hash": "aba5c805cb1dbb253ef1c308d23e35f4",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 121,
"avg_line_length": 34.723247232472325,
"alnum_prop": 0.7184909670563231,
"repo_name": "mesonbuild/meson",
"id": "bbf31ad11cf8ce8ca9f2eac04dac118defb69ec3",
"size": "10004",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mesonbuild/dependencies/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5960"
},
{
"name": "Batchfile",
"bytes": "1499"
},
{
"name": "C",
"bytes": "204306"
},
{
"name": "C#",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "59193"
},
{
"name": "CMake",
"bytes": "38429"
},
{
"name": "Cuda",
"bytes": "10592"
},
{
"name": "Cython",
"bytes": "1921"
},
{
"name": "D",
"bytes": "7840"
},
{
"name": "Fortran",
"bytes": "12248"
},
{
"name": "Genie",
"bytes": "476"
},
{
"name": "HTML",
"bytes": "897"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "3768"
},
{
"name": "JavaScript",
"bytes": "150"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "219"
},
{
"name": "Limbo",
"bytes": "28"
},
{
"name": "Meson",
"bytes": "601347"
},
{
"name": "Objective-C",
"bytes": "686"
},
{
"name": "Objective-C++",
"bytes": "378"
},
{
"name": "PowerShell",
"bytes": "4728"
},
{
"name": "Python",
"bytes": "4125854"
},
{
"name": "Roff",
"bytes": "625"
},
{
"name": "Rust",
"bytes": "4039"
},
{
"name": "Shell",
"bytes": "12539"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10033"
},
{
"name": "Verilog",
"bytes": "696"
},
{
"name": "Vim Script",
"bytes": "10684"
},
{
"name": "Yacc",
"bytes": "103"
}
],
"symlink_target": ""
} |
import math
import sys
import os
# import the finalization function
from orbit.utils import orbitFinalize
# import general accelerator elements and lattice
from orbit.lattice import AccLattice, AccNode, AccActionsContainer
from orbit.py_linac.lattice import MarkerLinacNode
import orbit_utils
from orbit_utils import bunch_utils_functions
from bunch_utils_functions import copyCoordsToInitCoordsAttr
from bunch_utils_functions import transportMtrxFromInitCoords
from orbit_utils import Matrix
class LinacTrMatrixGenNode(MarkerLinacNode):
"""
Linac Accelerator Nodes for Transport Matrices generation.
These nodes are using thethe Initial Coordinates particles Attrubutes.
Each node (if it is not the first one) calculates the transport matrix
between the previous node and itself.
The matrix is a 7x7 matrix that transforms the initial particles
coordinates to the final ones that are in the bu
"""
def __init__(self, trMatricesController, name = "TrMatrixGen"):
if(name == "TrMatrixGen"):
name += name+":"+str(trMatricesController.getCount())
MarkerLinacNode.__init__(self,name)
self.trMatricesController = trMatricesController
self.trMtrxNode_ind = trMatricesController.getCount()
self.use_twiss_weight_x = 0
self.use_twiss_weight_y = 0
self.use_twiss_weight_z = 0
self.relativistic_beta = 0.
self.relativistic_gamma = 0.
#--------------------------------------
self.trMtrx = Matrix(7,7)
#--------------------------------------
self.trMatricesController.addNode(self)
def setInternalIndex(self,ind):
"""
Sets the index of the TrMatrxGenNode in the controller
"""
self.trMtrxNode_ind = ind
def getTrMatricesController(self):
"""
Returns the LinacTrMatricesContrioller that keeps the references to the TrMatrxGenNodes.
"""
return self.trMatricesController
def getTwissWeightUse(self):
"""
Returns (use_x,use,use_z) tuple where use_{} == 1 means the Twiss weights will be used.
"""
res_arr = [True,True,True]
if(self.use_twiss_weight_x == 0): res_arr[0] = False
if(self.use_twiss_weight_y == 0): res_arr[1] = False
if(self.use_twiss_weight_z == 0): res_arr[2] = False
return tuple(res_arr)
def setTwissWeightUse(self,use_twiss_weight_x,use_twiss_weight_y,use_twiss_weight_z):
"""
Sets (use_x,use,use_z) tuple where use_{} == 1 means the Twiss weights will be used.
"""
self.use_twiss_weight_x = 0
self.use_twiss_weight_y = 0
self.use_twiss_weight_z = 0
if(use_twiss_weight_x == True): self.use_twiss_weight_x = 1
if(use_twiss_weight_y == True): self.use_twiss_weight_y = 1
if(use_twiss_weight_z == True): self.use_twiss_weight_z = 1
def track(self, paramsDict):
bunch = paramsDict["bunch"]
self.relativistic_beta = bunch.getSyncParticle().beta()
self.relativistic_gamma = bunch.getSyncParticle().gamma()
if(self.trMtrxNode_ind == 0):
self.trMtrx.unit()
copyCoordsToInitCoordsAttr(bunch)
else:
transportMtrxFromInitCoords(bunch,self.trMtrx,self.use_twiss_weight_x,self.use_twiss_weight_y,self.use_twiss_weight_z)
def trackDesign(self, paramsDict):
"""
This method does nothing for the aperture case.
"""
pass
def getBeta(self):
"""
Returns relativistic beta at this node.
"""
return self.relativistic_beta
def getGamma(self):
"""
Returns relativistic gamma at this node.
"""
return self.relativistic_gamma
def getTransportMatrix(self):
"""
Return transport matrix (7x7).
"""
return self.trMtrx
def getDetXYZ(self, trMtrx = None):
"""
Returns the determinants of the transformations in (x,y,z) directions.
"""
if(trMtrx == None): trMtrx = self.trMtrx
det_x = trMtrx.get(0,0)*trMtrx.get(1,1) - trMtrx.get(1,0)*trMtrx.get(0,1)
det_y = trMtrx.get(0+2,0+2)*trMtrx.get(1+2,1+2) - trMtrx.get(1+2,0+2)*trMtrx.get(0+2,1+2)
det_z = trMtrx.get(0+4,0+4)*trMtrx.get(1+4,1+4) - trMtrx.get(1+4,0+4)*trMtrx.get(0+4,1+4)
return (det_x,det_y,det_z)
def getNormDetXYZ(self):
"""
Returns the normalized determinants of the transformations in (x,y,z) directions.
"""
(node0,node1) = self.getTwoNodes()
beta_in = node0.getBeta()
beta_out = node1.getBeta()
gamma_in = node0.getGamma()
gamma_out = node1.getGamma()
gb_in = beta_in*gamma_in
gb_out = beta_out*gamma_out
(det_x,det_y,det_z) = self.getDetXYZ(self.trMtrx)
return ((gb_out/gb_in)*det_x,(gb_out/gb_in)*det_y,(beta_in/beta_out)*det_z)
def getTwoNodes(self):
"""
Returns two LinacTrMatrixGenNode nodes. The transport matrix is between these nodes.
"""
node0 = self
if(self.trMtrxNode_ind > 0):
node0 = self.trMatricesController.getNode(0)
node1 = self.trMatricesController.getNode(self.trMtrxNode_ind)
return (node0,node1)
def printMatrix(self):
"""
Print the matrix.
"""
name0 = "None"
if(self.trMtrxNode_ind > 0):
name0 = self.trMatricesController.getNode(self.trMtrxNode_ind-1).getName()
name1 = self.trMatricesController.getNode(self.trMtrxNode_ind).getName()
print "----Transport matrix--- from name0=",name0," to name1=",name1
m = self.trMtrx
for i in xrange(m.size()[0]):
for j in xrange(m.size()[1]):
print ("m(" + str(i) + "," + str(j)+")="+"%12.5g"%m.get(i,j) + " "),
print ""
class LinacTrMatricesContrioller:
"""
LinacTrMatricesContrioller keeps the references to the LinacTrMatrixGenNode
instances.
"""
def __init__(self):
self.trMatrxNodes = []
def getCount(self):
return len(self.trMatrxNodes)
def getNode(self,ind):
return self.trMatrxNodes[ind]
def addNode(self,trMatrxNode):
self.trMatrxNodes.append(trMatrxNode)
def getNodes(self):
return self.trMatrxNodes
def init(self):
#--- place nodes in the right order
nodes = self.trMatrxNodes
self.trMatrxNodes = sorted(nodes, key = lambda x: x.getPosition(), reverse = False)
for node_ind in range(len(self.trMatrxNodes)):
node = self.trMatrxNodes[node_ind]
node.setInternalIndex(node_ind)
def addTrMatrxGenNodes(self, accLattice, node_or_nodes, place = MarkerLinacNode.ENTRANCE):
"""
Adds the LinacTrMatrixGenNode to the nodes as child nodes.
"""
nodes = []
if(type(node_or_nodes) in [tuple,list]):
for node in node_or_nodes:
nodes.append(node)
else:
nodes.append(node_or_nodes)
#-----------------------------
for node in nodes:
trMatrxGenNode = LinacTrMatrixGenNode(self,node.getName()+":trMatrx")
node.addChildNode(trMatrxGenNode,place)
#----- set up the position of the TrMatrix nodes
actions = AccActionsContainer()
def accNodeExitAction(paramsDict):
"""
Nonbound function. Sets the position of the TrMatrix nodes.
"""
node = paramsDict["node"]
if(isinstance(node,LinacTrMatrixGenNode)):
pos = paramsDict["path_length"]
node.setPosition(pos)
actions.addAction(accNodeExitAction, AccNode.EXIT)
accLattice.trackActions(actions)
self.init()
return self.trMatrxNodes
def addTrMatrxGenNodesAtEntrance(self, accLattice, node_or_node):
"""
Adds the LinacTrMatrixGenNode to the nodes as child nodes at the entrance.
"""
self.addTrMatrxGenNodes(accLattice, node_or_nodes, MarkerLinacNode.ENTRANCE)
def addTrMatrxGenNodesAtExit(self, accLattice, node_or_node):
"""
Adds the LinacTrMatrixGenNode to the nodes as child nodes at the exit.
"""
self.addTrMatrxGenNodes(accLattice, node_or_nodes, MarkerLinacNode.EXIT)
| {
"content_hash": "9559ac516870e634db8b0d438512b0a6",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 121,
"avg_line_length": 31.69396551724138,
"alnum_prop": 0.6994424044607643,
"repo_name": "azukov/py-orbit",
"id": "754f970b6ec0d9937ea186c8b7a4eb4572697bc4",
"size": "7860",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/orbit/py_linac/lattice/LinacTransportMatrixGenNodes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1754740"
},
{
"name": "Dockerfile",
"bytes": "232"
},
{
"name": "Makefile",
"bytes": "13194"
},
{
"name": "Python",
"bytes": "1025246"
},
{
"name": "Shell",
"bytes": "2982"
}
],
"symlink_target": ""
} |
from Framework.Resposta import Resposta
from Models.Campus.Campus import Campus as ModelCampus
class RespostaVer(Resposta):
def __init__(self,campus):
self.corpo = ModelCampus(campus)
| {
"content_hash": "b9f23b0e3496519c6aa3ec8a727aca0f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 54,
"avg_line_length": 27,
"alnum_prop": 0.7883597883597884,
"repo_name": "AEDA-Solutions/matweb",
"id": "a05cc78fd3b0e0f79287927b66a75e74ef82e38b",
"size": "189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/Models/Predio/RespostaVer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "475557"
},
{
"name": "HTML",
"bytes": "12097161"
},
{
"name": "JavaScript",
"bytes": "190487"
},
{
"name": "PHP",
"bytes": "1122"
},
{
"name": "Python",
"bytes": "152996"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
} |
def verbing(s):
# +++your code here+++
if len(s)<3:
return s
elif s[-3:]=="ing":
return s+"ly"
else:
return s+"ing"
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
n = s.find("not")
b = s.find("bad")
if n<b:
return s[:n]+"good"+s[b+3:]
else:
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
af = a[:len(a)-len(a)/2]
bf = b[:len(b)-len(b)/2]
ab = a[(len(a)+1)/2:]
bb = b[(len(b)+1)/2:]
return af+bf+ab+bb
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| {
"content_hash": "dca00828cfeb417df8359bb6813abd19",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 77,
"avg_line_length": 27.92207792207792,
"alnum_prop": 0.6223255813953489,
"repo_name": "chinbat/google-python-exercises",
"id": "bba058c560c5f04332da134a5e0d9bdb1ff7841f",
"size": "2664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/string2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "647778"
},
{
"name": "Python",
"bytes": "52568"
}
],
"symlink_target": ""
} |
import os
import json
import numpy as np
import scipy.sparse.csgraph
import pickle
import sys
import hulo_file.FileUtils as FileUtils
import hulo_param.ReconstructParam as ReconstructParam
import hulo_sfm.mergeSfM as mergeSfM
class sfmModel:
def __init__(self, name, imgFolLoc, csvFolLoc, matchesFolLoc, locFolLoc, sfm_dataLoc,
validMergeRansacThres=-1, validMergeRansacThresK=-1, ransacStructureThres=-1, ransacStructureThresK=-1,
mergeStructureThres=-1, mergeStructureThresK=-1):
if (validMergeRansacThres==-1 and validMergeRansacThresK==-1):
print "error : invalid argument for sfmModel valid merge ransac"
sys.exit()
if (ransacStructureThres==-1 and ransacStructureThresK==-1):
print "error : invalid argument for sfmModel structure ransac"
sys.exit()
if (mergeStructureThres==-1 and mergeStructureThresK==-1):
print "error : invalid argument for sfmModel structure merge"
sys.exit()
self.name = name # folder name
self.mergeOrder = name # structure similar to a tree specifying merge order
self.imgFolLoc = imgFolLoc # folder dir of input image folder
self.csvFolLoc = csvFolLoc # folder dir of csv folder
self.matchesFolLoc = matchesFolLoc # folder of match folder with descriptor
self.locFolLoc = locFolLoc # folder of localization result
self.sfm_dataLoc = sfm_dataLoc # file dir of sfm_data.json
# get list of reconstructed frames
if self.sfm_dataLoc != "":
sfm_data = FileUtils.loadjson(self.sfm_dataLoc)
extKeyTmp = [x["key"] for x in sfm_data["extrinsics"]]
self.reconFrame = [x["value"]["ptr_wrapper"]["data"]["id_view"] for x in sfm_data["views"] if x["value"]["ptr_wrapper"]["data"]["id_pose"] in extKeyTmp]
if validMergeRansacThresK>0:
self.validMergeRansacThres = mergeSfM.findMedianThres(sfm_data, validMergeRansacThresK)
else:
self.validMergeRansacThres = validMergeRansacThres
if ransacStructureThresK>0:
self.ransacStructureThres = mergeSfM.findMedianStructurePointsThres(sfm_data, ransacStructureThresK)
else:
self.ransacStructureThres = ransacStructureThres
if mergeStructureThresK>0:
self.mergeStructureThres = mergeSfM.findMedianStructurePointsThres(sfm_data, mergeStructureThresK)
else:
self.mergeStructureThres = mergeStructureThres
# update information in self with information from newInfo
def update(self, newInfo):
self.name = newInfo.name
self.imgFolLoc = newInfo.imgFolLoc
self.csvFolLoc = newInfo.csvFolLoc
self.matchesFolLoc = newInfo.matchesFolLoc
self.locFolLoc = newInfo.locFolLoc
self.sfm_dataLoc = newInfo.sfm_dataLoc
self.reconFrame = newInfo.reconFrame
self.mergeOrder = newInfo.mergeOrder
self.validMergeRansacThres = newInfo.validMergeRansacThres
self.ransacStructureThres = newInfo.ransacStructureThres
self.mergeStructureThres = newInfo.mergeStructureThres
class sfmGraph:
# receive a path
# if path is Input folder, list all folders as project
# if path is log file, load previous data
def __init__(self, inputPath, outputPath, mInputPath, mSfMPath, mMatchesPath, mCsvPath, mInputImgPath,
workspacePath, validMergeRansacThresK=5, ransacStructureThresK=10,
mergeStructureThresK=0.01, minReconFrame=25):
self.sfmModel = [] # list of sfmModel objects for merging
self.mSfMPath = mSfMPath # sfm path containing multiple folder of merged models
self.mMatchesPath = mMatchesPath # matches path
self.mCsvPath = mCsvPath # csv path
self.mInputImgPath = mInputImgPath # input images path
self.mInputPath = mInputPath
self.nMergedModel = 0 # number of merged performed
self.badMatches = [] # keeps pair of bad matches
self.workspacePath = workspacePath
FileUtils.makedir(self.mSfMPath)
FileUtils.makedir(self.mMatchesPath)
FileUtils.makedir(self.mCsvPath)
FileUtils.makedir(self.mInputImgPath)
FileUtils.makedir(self.mInputPath)
# list all folder as projects
if os.path.isdir(inputPath):
listDir = os.listdir(inputPath)
for folder in sorted(listDir):
# add model to self
self.addModel(folder, os.path.join(inputPath,folder), os.path.join(outputPath,folder),
minReconFrame, validMergeRansacThresK, ransacStructureThresK, mergeStructureThresK)
# add model to merge graph
# note that the ordering of the folders in input and
# output paths must be in correct format
#
# Input
# videoName : name of video
# inputPath : path to input folder (with inputImg and csv folders)
# outputPath ; path to output folder (with matches and SfM folders)
# minimumFrame : minimum number of frames used in reconstruction to be used for include
#
# Output
# added : boolean whether the video is added
def addModel(self,videoName,inputPath,outputPath,minimumFrame,
validMergeRansacThresK,ransacStructureThresK,mergeStructureThresK):
# check whether all folders and files exists
if (not os.path.isdir(inputPath)) or \
(not os.path.isdir(os.path.join(inputPath,"inputImg"))) or \
(not os.path.isdir(outputPath)) or \
(not os.path.isdir(os.path.join(outputPath,"matches"))) or \
(not os.path.isdir(os.path.join(outputPath,"SfM"))) or \
(not os.path.isdir(os.path.join(outputPath,"SfM","reconstruction"))) or \
(not os.path.isdir(os.path.join(outputPath,"SfM","reconstruction","global"))) or \
(not os.path.isfile(os.path.join(outputPath,"SfM","reconstruction","global","sfm_data.json"))):
print videoName + " is not a complete SfM project and will be ignored."
return False
# check if there is a video with the same name
elif videoName in [x.name for x in self.sfmModel]:
print "There exists other video with name \"" + videoName + "\", thus this video will be ignored."
return False
# generate sfmModel object
newModel = sfmModel(
videoName,
os.path.join(inputPath,"inputImg"),
os.path.join(inputPath,"csv"),
os.path.join(outputPath,"matches"),
os.path.join(outputPath,"loc"),
os.path.join(outputPath,"SfM","reconstruction","global","sfm_data.json"),
validMergeRansacThresK=validMergeRansacThresK,
ransacStructureThresK=ransacStructureThresK,
mergeStructureThresK=mergeStructureThresK)
# check number of frame is above minimum
if len(newModel.reconFrame) < minimumFrame:
print "# of reconstructed frames (" + str(len(newModel.reconFrame)) +") is lower than threshold (" + \
str(minimumFrame) + "), hence model " + newModel.name + " will not be included."
return False
print "Including " + videoName + " into merge order."
self.sfmModel.append(newModel)
return True
# calculate graph between all pairs of model
def calcGraph(self):
print "Calculating graph edges between videos"
nModel = len(self.sfmModel)
graphEdges = np.zeros((nModel,nModel),dtype=np.float32)
for i in range(0,nModel-1):
for j in range(i+1,nModel):
graphEdges[i,j] = 1.0
graphEdges[j,i] = 1.0
print "Complete calculating graph edges between videos"
return graphEdges
# save result to file
def save(self, outputPath):
with open(outputPath,"wb") as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
# load saved file
@staticmethod
def load(inputPath):
with open(inputPath,"rb") as inputFile:
data = pickle.load(inputFile)
return data
# merge one sfmModel to other (specifically, model 2 to model 1)
# all required folder will be created
# returns whether the merge is success, and merged sfmModel
def mergeOneModel(self, model1, model2, reconParam):
sfmOutPath = os.path.join(self.mSfMPath,"global"+str(self.nMergedModel))
# modified by T. IShihara 2016.06.14
# fix file name too long issue
#
# create a temporary folder for reconstructed image of model2
#inputImgTmpFolder = os.path.join(self.mSfMPath,"inputImgTmp","inputImgTmp"+model2.name)
inputImgTmpFolder = os.path.join(self.mSfMPath,"inputImgTmp","inputImgTmpModel2")
if os.path.isdir(inputImgTmpFolder):
FileUtils.removedir(inputImgTmpFolder)
# copy reconstructed image fom model2 to tmp folder
sfm_data2 = FileUtils.loadjson(model2.sfm_dataLoc)
if not os.path.isdir(inputImgTmpFolder):
listReconFrameName = [sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]["filename"] for x in range(0,len(sfm_data2["views"])) if sfm_data2["views"][x]["value"]["ptr_wrapper"]["data"]["id_view"] in model2.reconFrame]
FileUtils.makedir(inputImgTmpFolder)
for reconFrameName in listReconFrameName:
os.system("cp -s " + os.path.join(model2.imgFolLoc,reconFrameName) + " " + inputImgTmpFolder)
# remove all old localization result
FileUtils.removedir(model2.locFolLoc)
FileUtils.makedir(model2.locFolLoc)
# localize the images from model2 on model1
guideMatchOption = ""
if reconParam.bGuidedMatchingLocalize:
guideMatchOption = " -gm"
os.system(reconParam.LOCALIZE_PROJECT_PATH + \
" " + inputImgTmpFolder + \
" " + os.path.dirname(model1.sfm_dataLoc) + \
" " + self.mMatchesPath + \
" " + model2.locFolLoc + \
" -f=" + str(reconParam.locFeatDistRatio) + \
" -r=" + str(reconParam.locRansacRound) + \
" -e=" + model2.csvFolLoc + \
" -i=" + str(reconParam.locSkipFrame) + \
guideMatchOption)
# remove temporary image folder
# removedir(inputImgTmpFolder)
# extract centers from all json file and write to a file
fileLoc = open(os.path.join(model2.locFolLoc,"center.txt"),"w")
countLocFrame = 0
for filename in sorted(os.listdir(model2.locFolLoc)):
if filename[-4:]!="json":
continue
countLocFrame = countLocFrame + 1
with open(os.path.join(model2.locFolLoc,filename)) as locJson:
locJsonDict = json.load(locJson)
if "t" in locJsonDict:
loc = locJsonDict["t"]
fileLoc.write(str(loc[0]) + " " + str(loc[1]) + " " +str(loc[2]) + " 255 0 0\n" )
fileLoc.close()
# get inlier matches
FileUtils.makedir(sfmOutPath)
resultSfMDataFile = os.path.join(sfmOutPath,"sfm_data.json")
# below also checks if the ratio between first and last svd of M[0:3,0:3]
# is good or not. If not then reject
# TODO : revisit ransacRound parameter, use number of reconstruction frame to determine structure points transform seems small
nMatchPointsTmp, nInlierTmp, M = mergeSfM.mergeModel(model1.sfm_dataLoc,
model2.sfm_dataLoc,
model2.locFolLoc,
resultSfMDataFile,
ransacThres=model1.ransacStructureThres,
mergePointThres=model1.mergeStructureThres,
ransacRoundMul=reconParam.ransacRoundMul,
inputImgDir=self.mInputImgPath,
minLimit=reconParam.min3DnInliers)
ratioInlierMatchPoints = 0.0
if nMatchPointsTmp>0:
ratioInlierMatchPoints = float(nInlierTmp)/nMatchPointsTmp
# 3. perform test whether merge is good
sfm_merge_generated = True
countFileAgree = 0
countFileLoc = 1
if os.path.isfile(resultSfMDataFile):
os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + resultSfMDataFile + " " + resultSfMDataFile)
countFileLoc, countFileAgree = mergeSfM.modelMergeCheckLocal(resultSfMDataFile, model2.locFolLoc, model1.validMergeRansacThres)
else:
sfm_merge_generated = False
ratioAgreeFrameReconFrame = 0.0
if (len(model2.reconFrame)>0):
ratioAgreeFrameReconFrame = float(countFileAgree)/len(model2.reconFrame)
ratioAgreeFrameLocFrame = 0.0
if (countFileLoc>0):
ratioAgreeFrameLocFrame = float(countFileAgree)/countFileLoc
# write log file
with open(os.path.join(self.mSfMPath,"global"+str(self.nMergedModel),"log.txt"),"a") as filelog:
filelog.write(("M1: " + model1.name + "\n" + \
"M2: " + model2.name + "\n" + \
"nMatchedPoints: " + str(nMatchPointsTmp) + "\n" + \
"nInliers: " + str(nInlierTmp) + "\n" + \
"ratioInlierWithMatchedPoints: " + str(ratioInlierMatchPoints) + "\n" + \
"countLocFrame: " + str(countLocFrame) + "\n" + \
"nReconFrame M2: " + str(len(model2.reconFrame)) + "\n" + \
"countFileAgree: " + str(countFileAgree) + "\n" + \
"countFileLoc: " + str(countFileLoc) + "\n" + \
"not sfm_merge_generated: " + str(not sfm_merge_generated) + "\n" + \
# obsolete condition by T. Ishihara 2015.11.10
#"nInlierTmp > "+str(reconParam.vldMergeRatioInliersFileagree)+"*countFileAgree: " + str(nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree) + "\n" + \
"countFileAgree > "+str(reconParam.vldMergeMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeMinCountFileAgree) + "\n" + \
# obsolete condition by T. Ishihara 2016.04.02
#"countFileAgree > "+str(reconParam.vldMergeSmallMinCountFileAgree)+": " + str(countFileAgree > reconParam.vldMergeSmallMinCountFileAgree) + "\n" + \
# obsolete condition by T. Ishihara 2016.04.02
#"countFileLoc < countFileAgree*" +str(reconParam.vldMergeShortRatio)+ ": " + str(countFileLoc < countFileAgree*reconParam.vldMergeShortRatio) + "\n" + \
"ratioLocAgreeWithReconFrame: " + str(ratioAgreeFrameReconFrame) + "\n" + \
"ratioLocAgreeWithReconFrame > " + str(reconParam.vldMergeRatioAgrFReconF) + ": " + str(ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) + "\n" + \
"ratioLocAgreeWithLocFrame: " + str(ratioAgreeFrameLocFrame) + "\n" + \
"ratioLocAgreeWithLocFrame > " + str(reconParam.vldMergeRatioAgrFLocF) + ": " + str(ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF) + "\n" + \
str(M) + "\n\n"))
# rename the localization folder to save localization result
'''
if os.path.isdir(model2.locFolLoc+model1.name):
FileUtils.removedir(model2.locFolLoc+model1.name)
os.rename(model2.locFolLoc,model2.locFolLoc+model1.name)
'''
# obsolete merge condition
'''
if not sfm_merge_generated or \
not (nInlierTmp > reconParam.vldMergeRatioInliersFileagree*countFileAgree and \
((countFileAgree > reconParam.vldMergeMinCountFileAgree or (countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and countFileLoc < countFileAgree*reconParam.vldMergeShortRatio)) and \
((nInlierTmp > reconParam.vldMergeNInliers and float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconFNInliers) or float(countFileAgree)/countFileLoc > reconParam.vldMergeRatioAgrFLocF) and
(float(countFileAgree)/len(model2.reconFrame) > reconParam.vldMergeRatioAgrFReconF))):
'''
# update merge condition by T. Ishihara 2015.11.10
'''
if not sfm_merge_generated or \
not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
countFileAgree > reconParam.vldMergeSmallMinCountFileAgree and \
countFileLoc < countFileAgree*reconParam.vldMergeShortRatio and \
((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
'''
# update merge condition by T. Ishihara 2016.04.02
'''
if not sfm_merge_generated or \
not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
((nInlierTmp > reconParam.vldMergeNInliers and ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconFNInliers) or \
ratioAgreeFrameReconFrame > reconParam.vldMergeRatioAgrFReconF) and \
ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF):
'''
# update merge condition by T. Ishihara 2016.06.09
'''
if not sfm_merge_generated or \
not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF and \
nInlierTmp > reconParam.min3DnInliers and \
ratioInlierMatchPoints > reconParam.vldMergeRatioInliersMatchPoints):
'''
# update merge condition by T. Ishihara 2016.06.20
if not sfm_merge_generated or \
not (countFileAgree > reconParam.vldMergeMinCountFileAgree and \
ratioAgreeFrameLocFrame > reconParam.vldMergeRatioAgrFLocF and \
nInlierTmp > reconParam.min3DnInliers):
print "Transformed locations do not agree with localization. Skip merge between " + model1.name + " and " + model2.name + "."
'''
if os.path.isfile(os.path.join(sfmOutPath,"sfm_data.json")):
os.rename(os.path.join(sfmOutPath,"sfm_data.json"), \
os.path.join(sfmOutPath,"sfm_data_("+model1.name + "," + model2.name+").json"))
'''
if os.path.isfile(os.path.join(sfmOutPath,"sfm_data.json")):
os.rename(os.path.join(sfmOutPath,"sfm_data.json"), \
os.path.join(sfmOutPath,"sfm_data_fail_merge.json"))
# move to next video
return False, sfmModel("","","","","","",validMergeRansacThres=0,validMergeRansacThresK=0,
ransacStructureThres=0, ransacStructureThresK=0,
mergeStructureThres=0, mergeStructureThresK=0)
# generate colorized before bundle adjustment for comparison
os.system("openMVG_main_ComputeSfM_DataColor " +
" -i " + os.path.join(sfmOutPath,"sfm_data.json") +
" -o " + os.path.join(sfmOutPath,"colorized_pre.ply"))
# TODO : try computing structure from know pose here
# https://github.com/openMVG/openMVG/issues/246
# http://openmvg.readthedocs.io/en/latest/software/SfM/ComputeStructureFromKnownPoses/
# TODO : revisit the order of bundle adjustment
# perform bundle adjustment
'''
os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
" -c=" + "rs,rst,rsti" + " -r=" + "1")
'''
os.system(reconParam.BUNDLE_ADJUSTMENT_PROJECT_PATH + " " + os.path.join(sfmOutPath,"sfm_data.json") + " " + os.path.join(sfmOutPath,"sfm_data.json") + \
" -c=" + "rst,rsti" + " -r=" + "1")
os.system("openMVG_main_ComputeSfM_DataColor " +
" -i " + os.path.join(sfmOutPath,"sfm_data.json") +
" -o " + os.path.join(sfmOutPath,"colorized.ply"))
return True, sfmModel("A" + model1.name + "," + model2.name +"Z", self.mInputImgPath, self.mCsvPath,
self.mMatchesPath, os.path.join(sfmOutPath,"loc"), resultSfMDataFile,
validMergeRansacThres=model1.validMergeRansacThres,
ransacStructureThres=model1.ransacStructureThres,
mergeStructureThres=model1.mergeStructureThres)
# check if the pair video1, video2 is a bad match, i.e. a localization
# between these videos failed before, so can skip without redoing localization
def isBadMatch(self,video1,video2):
if [video1.name,video2.name] in self.badMatches or [video2.name,video1.name] in self.badMatches:
return True
return False
# clear history of bad matches
def clearBadMatches(self):
self.badMatches = []
# perform merging model
# Input
# image_descFile : path to image_describer.txt
def mergeModel(self, image_descFile, inputPath, outputPath, reconParam=ReconstructParam):
print "Begin merging models"
FileUtils.makedir(self.mInputImgPath)
FileUtils.makedir(self.mCsvPath)
FileUtils.makedir(self.mMatchesPath)
FileUtils.makedir(self.mSfMPath)
# create symbolic links to all images, csv, and descriptor/feature files
os.system("cp --remove-destination -s " + os.path.join(inputPath,"*","inputImg","*") + " " + self.mInputImgPath)
os.system("cp --remove-destination -s " + os.path.join(inputPath,"*","csv","*") + " " + self.mCsvPath)
os.system("cp --remove-destination -s " + os.path.join(outputPath,"*","matches","*.desc") + " " + self.mMatchesPath)
os.system("cp --remove-destination -s " + os.path.join(outputPath,"*","matches","*.feat") + " " + self.mMatchesPath)
os.system("cp --remove-destination -s " + os.path.join(outputPath,"*","matches","*.bow") + " " + self.mMatchesPath)
# copy image_describer.txt
os.system("cp --remove-destination " + image_descFile + " " + self.mMatchesPath)
listLead = range(0,len(self.sfmModel)) # list of model indexes which can initiate merge (list of model indexes which did not fail merge yet)
listBye = [] # list of model indexes which will not be used to initiate merge (list of model indexes which already failed merge)
baseVideo = -1
mergeCandidatesRemainsForBaseVideo = True
calcGraphEdges = False
while True:
# update model indexes which are not used to initiate merge
if not mergeCandidatesRemainsForBaseVideo:
listBye.append(self.sfmModel[baseVideo].name)
listName = [(x,self.sfmModel[x].name) for x in range(0,len(self.sfmModel))]
listLead = [x[0] for x in listName if x[1] not in listBye]
# if there was a merge, recalculate the cooccurence graph
if mergeCandidatesRemainsForBaseVideo:
# calculate cooccurence graph
if not calcGraphEdges:
graphEdges = self.calcGraph()
calcGraphEdges = True
print "graph edges : " + str(graphEdges)
print "SfM model names : " + str([x.name for x in self.sfmModel])
connectionGraph = (graphEdges > 0.0)
# calculate connected component on graph
ccLabel = scipy.sparse.csgraph.connected_components(
connectionGraph,
directed=False)[1]
# if nore more mergable components
if len(np.unique(ccLabel)) == len(ccLabel):
print "No more mergable components. Exiting."
return
# sort the length of reconstructed frames in each video
# from small to large to find the base Video
reconFrameLenList = [len(self.sfmModel[i].reconFrame) for i in range(0,len(self.sfmModel))]
reconFrameLenIdx = [x[0] for x in sorted(enumerate(reconFrameLenList), key=lambda y:y[1])]
# find first base video that has a connected component
baseVideo = ""
for video in reconFrameLenIdx:
if np.sum(ccLabel==ccLabel[video]) > 1 and video in listLead:
baseVideo = video
break
# this should never be called since program should exit
# if there is no connected components in grap
if baseVideo == "":
print "Cannot find connected component to merge. Exiting."
return
# get videos that connect to this baseVideo
# and sort the from smallest to largest as merge order
neighborVec = np.where(connectionGraph[baseVideo,:])[0]
neighborVec = neighborVec[neighborVec!=baseVideo] # prevent selecting itself to merge
mergeCandidate = neighborVec.tolist()
nReconFrameMergeCand = [len(self.sfmModel[x].reconFrame) for x in mergeCandidate]
orderMergeCand = [x[0] for x in sorted(enumerate(nReconFrameMergeCand), key=lambda y:y[1])]
mergeCandidateModel = [self.sfmModel[mergeCandidate[i]] for i in orderMergeCand]
mergedModel = self.sfmModel[baseVideo]
print "Based model: " + mergedModel.name
print "To merge with: " + str([x.name for x in mergeCandidateModel])
mergeCandidatesRemainsForBaseVideo = False
for video in mergeCandidateModel:
# check if failed localization has been performed on this pair before
# if so, skip this localization
if self.isBadMatch(video,mergedModel):
continue
# swap order so small model is merged to larger model
swap = False
if len(mergedModel.reconFrame) < len(video.reconFrame):
tmp = mergedModel
mergedModel = video
video = tmp
swap = True
# attempt merge
mergeResult, mergedModelTmp = self.mergeOneModel(mergedModel,video,reconParam)
if mergeResult:
mergedModel.update(mergedModelTmp)
videoIdx = self.sfmModel.index(video)
del self.sfmModel[videoIdx]
# update graph
graphEdges = np.delete(graphEdges,videoIdx,0)
graphEdges = np.delete(graphEdges,videoIdx,1)
self.nMergedModel = self.nMergedModel+1
self.save(os.path.join(self.mSfMPath,"global" + str(self.nMergedModel-1),"mergeGraph.txt"))
self.save(os.path.join(self.mSfMPath,"mergeGraph.txt"))
mergeCandidatesRemainsForBaseVideo = True
# reset listBye to allow small model to merge to new large model
listBye = []
# write result log file
with open(os.path.join(self.mSfMPath,"logRecon.txt"),"a") as outLogFile:
outLogFile.write(str(self.nMergedModel-1) + " " + mergedModel.name + "\n")
# start again
break
else:
# add to bad matches
self.badMatches.append([video.name,mergedModel.name])
# save
self.save(os.path.join(self.mSfMPath,"mergeGraph.txt"))
if swap:
# swap back if not merged
mergedModel = video
| {
"content_hash": "18b53627ec6c037d48222e49edff3ff5",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 231,
"avg_line_length": 53.14936247723133,
"alnum_prop": 0.5944343534733884,
"repo_name": "hulop/SfMLocalization",
"id": "5ffead8df0054c56b1060f5906d8b4889f53d281",
"size": "30461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyVisionLocalizeCommon/src/hulo_sfm/sfmMergeGraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "249756"
},
{
"name": "CMake",
"bytes": "4101"
},
{
"name": "CSS",
"bytes": "1279"
},
{
"name": "HTML",
"bytes": "82679"
},
{
"name": "JavaScript",
"bytes": "30698"
},
{
"name": "Python",
"bytes": "391566"
}
],
"symlink_target": ""
} |
from flask_login import LoginManager
from valsketch.models import User
#debug_toolbar = DebugToolbarExtension()
login_manager = LoginManager()
login_manager.login_view = "views.signin"
login_manager.login_message_category = "warning"
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| {
"content_hash": "39c748844529cb8f3601bffd1ba13a5e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 48,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.7750759878419453,
"repo_name": "val-iisc/sketch-parse",
"id": "002d69932a2249fd774276badca24e0a96cc9c19",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "valsketch/valsketch/extensions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "198387"
},
{
"name": "CSS",
"bytes": "4314"
},
{
"name": "HTML",
"bytes": "13333"
},
{
"name": "JavaScript",
"bytes": "408218"
},
{
"name": "Jupyter Notebook",
"bytes": "532326"
},
{
"name": "M",
"bytes": "129"
},
{
"name": "MATLAB",
"bytes": "402816"
},
{
"name": "Python",
"bytes": "131691"
}
],
"symlink_target": ""
} |
import os.path
import json
import tornado.web
import drift.session as session
import drift.processor as processor
class SessionHandler(tornado.web.RequestHandler):
def initialize(self, db, pool, blob_store, gentle_client):
self.db = db
self.pool = pool
self.gentle_client = gentle_client
self.blob_store = blob_store
def get(self, path):
id, ext = os.path.splitext(path)
try:
sess = self.db.get_session(id)
except KeyError, e:
raise tornado.web.HTTPError(404)
respJSON = '{"session": %s}' % session.marshal_json(sess)
if ext == '':
return self.render('../templates/view.html', data=respJSON)
elif ext == '.json':
# HACK(maxhawkins): This sideload paramter is a temporary measure.
# Later these heavy resources should be stored separately
# from the session object. This will allow more efficient
# storage and caching. For now, though, we can save bandwidth
# by not sending them unless they're explicitly requested.
to_sideload = self.get_arguments('sideload')
if 'freq_hz' in sess and not 'freq_hz' in to_sideload:
del sess['freq_hz']
if 'waveform' in sess and not 'waveform' in to_sideload:
del sess['waveform']
if 'transcript' in sess and not 'transcript' in to_sideload:
del sess['transcript']
respJSON = '{"session": %s}' % session.marshal_json(sess)
self.set_header('Content-Type', 'text/json')
return self.write(respJSON)
elif ext == '.csv':
self.set_header('Content-Type', 'text/csv')
self.set_header('Content-Disposition', 'attachment; filename=%s.csv' % id)
return self.write(session.marshal_csv(sess))
def patch(self, path):
# TODO(maxhawkins): maybe just make sessions immutable and cache
# transcoding results instead?
id, ext = os.path.splitext(path)
sess = self.db.get_session(id)
if sess['status'] != 'DONE':
self.set_status(400)
return self.write('session status must be "DONE" before aligning')
body = json.loads(self.request.body)
transcript = body['transcript']
sess['status'] = 'ALIGNING'
self.db.save_session(sess)
self.pool.apply_async(
processor.transcribe,
(self.gentle_client, self.blob_store, sess, transcript),
callback=self.db.save_session)
return self.write(session.marshal_json(sess))
class UploadHandler(tornado.web.RequestHandler):
def initialize(self, blob_store, db, pool):
self.blob_store = blob_store
self.db = db
self.pool = pool
def get(self):
self.render("../templates/upload.html")
def post(self):
upload = self.request.files['file'][0]
original_id = self.blob_store.put(upload['body'])
sess = session.new()
sess['name'] = upload['filename']
sess['original_id'] = original_id
sess['status'] = 'PROCESSING'
self.db.save_session(sess)
self.pool.apply_async(
processor.process,
(sess, self.blob_store),
callback=self.db.save_session)
return self.write(session.marshal_json(sess))
class BlobHandler(tornado.web.StaticFileHandler):
def get(self, path, include_body=True):
key, ext = os.path.splitext(path)
self.set_header('Content-Disposition', 'attachment')
return tornado.web.StaticFileHandler.get(self, key, include_body)
| {
"content_hash": "b5d8f12fef19fc268bc0edd7bca251fe",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 86,
"avg_line_length": 37.16161616161616,
"alnum_prop": 0.6023375917368851,
"repo_name": "maxhawkins/drift",
"id": "4f3083be7fadf4a4a8ca4023e311c5ccaa41163a",
"size": "3679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drift/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10234"
},
{
"name": "CSS",
"bytes": "3890"
},
{
"name": "HTML",
"bytes": "1014"
},
{
"name": "JavaScript",
"bytes": "28977"
},
{
"name": "Makefile",
"bytes": "938"
},
{
"name": "Python",
"bytes": "48919"
}
],
"symlink_target": ""
} |
from PIL import Image
import sys
import os
import numpy as np
import argparse
parser = argparse.ArgumentParser(
description="Convert image to grayscale HDF5 or NetCDF.")
parser.add_argument(
'input', help="Input filename")
parser.add_argument(
'-f', '--format', action='store', default='nc',
choices=['nc', 'h5', 'txt'], help="Choose the target output format")
args = parser.parse_args()
name = os.path.splitext(os.path.basename(args.input))[0]
img = Image.open(args.input).convert('L')
data = np.array(img.getdata()).astype('float32')
data /= data.max()
shape = img.size
if args.format == 'h5':
import h5py as h5
print("Converting {} {} to {}.h5".format(args.input, shape, name),
file=sys.stderr)
outfile = h5.File('{}.h5'.format(name), 'w')
dataset = outfile.create_dataset(name, shape[::-1], dtype='f')
dataset[:] = data.reshape(shape[::-1])
outfile.close()
sys.exit()
if args.format == 'nc':
from netCDF4 import Dataset
print("Converting {} to {}.nc".format(args.input, name), file=sys.stderr)
root = Dataset('{}.nc'.format(name), 'w', format='NETCDF4')
root.createDimension('x', shape[0])
root.createDimension('y', shape[1])
var = root.createVariable(name, 'f4', ('y', 'x'), zlib=True, complevel=9)
var[:] = data
root.close()
sys.exit()
if args.format == 'txt':
print("Converting {} to standard output".format(args.input, name),
file=sys.stderr)
data.reshape(shape[::-1])
np.savetxt(sys.stdout.buffer, data, fmt='%.8f')
sys.exit()
| {
"content_hash": "f274653b1a3fd241357385aea5f86f8d",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 30.901960784313726,
"alnum_prop": 0.6294416243654822,
"repo_name": "abrupt-climate/hyper-canny",
"id": "2f9cd55bb725b2fa99082217dc8142026a7db06c",
"size": "1600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/convert_image.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15207"
},
{
"name": "C++",
"bytes": "2697379"
},
{
"name": "Makefile",
"bytes": "4604"
},
{
"name": "Meson",
"bytes": "4527"
},
{
"name": "Python",
"bytes": "10025"
},
{
"name": "Shell",
"bytes": "5280"
}
],
"symlink_target": ""
} |
__author__ = 'calvin'
"""
This example demonstrates the performance benefit of using list comprehension instead of for loop and append.
It also demonstrates how to use imported modules in your benchmarks as well as compare functions of the same group.
"""
try:
from builtins import range
except ImportError:
range = xrange
from math import sin #!
from pyperform import ComparisonBenchmark
@ComparisonBenchmark('Group1', validation=True, largs=(100,))
def list_append(n, *args, **kwargs):
l = []
for i in range(1, n):
l.append(sin(i))
return l
@ComparisonBenchmark('Group1', validation=True, largs=(100,))
def list_comprehension(n, *args, **kwargs):
l = [sin(i) for i in range(1, n)]
return l
ComparisonBenchmark.summarize('Group1', fs='report.txt')
| {
"content_hash": "cd133bab919bc565fcf6f238b36235ec",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 115,
"avg_line_length": 26.466666666666665,
"alnum_prop": 0.7052896725440806,
"repo_name": "lobocv/pyperform",
"id": "1e7aa9f44f52b05c6e2630e1df4402c5414b8f2f",
"size": "794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/list_comprehension.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35318"
}
],
"symlink_target": ""
} |
from __future__ import division
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import argparse
import os
import pandas as pd
import sys
from tqdm import tqdm
from classifier import get_text_pipeline, get_voting_classifier, DescriptionClassifier, ReadmeClassifier, NumericEnsembleClassifier, normalize, EnsembleAllNumeric, keep_useful_features
from constants import VALIDATION_DATA_PATH, ADDITIONAL_VALIDATION_DATA_PATH
from evaluation import drop_text_features
from load_data import process_data
from preprocess import ColumnSumFilter, ColumnStdFilter, PolynomialTransformer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import VotingClassifier, ExtraTreesClassifier
from sklearn.pipeline import Pipeline
from training import load_pickle, get_undersample_df, drop_defect_rows, JOBLIB_VOTING_PIPELINE_NAME, save_pickle
N_BEST_FEATURES = 100
NUMERIZE_README = False
SAVE_PCIKLES = True
def main():
parser = argparse.ArgumentParser(
description='Classify GitHub repositories using training data or a pre-trained model. The predicted repositories are saved in predictions.txt.')
parser.add_argument('-i', '--input-file', required=True,
help='Path to the input file that should be classified e.g. "data/example-input.txt"')
parser.add_argument('-t', '--training-file',
help='Path to the training file that should be used to train the classifier e.g. "data/example-output.txt". ' +
'Repository URL and label should be separated by a comma or a whitespace character.')
parser.add_argument('-p', '--processed', action='store_true',
help='Specifies that training file already contains fetched features.')
parser.add_argument('-l', '--loops',
help='Specifies how many classifiers should be trained on the given training data. The classifier with the highest average score on the validation data is used for the prediction (default=1).')
args = parser.parse_args()
if os.path.isfile(args.input_file):
classify(args)
else:
print "The input file doesn't exist"
sys.exit(1)
def classify(args):
input_path = args.input_file
df_input = get_input_data(input_path)
print "Fetching features for {} input samples".format(len(df_input))
df_input = process_data(data_frame=df_input)
if args.training_file:
if args.processed:
df_train = pd.read_csv(args.training_file)
else:
df_train = pd.read_csv(args.training_file, sep=' ', names=[
"repository", "label"])
print "Fetching features for {} training samples".format(len(df_train))
df_train = process_data(data_frame=df_train)
loops = args.loops or 1
loops = int(loops)
train_and_predict(df_train, df_input, loops)
else:
predict(df_input)
def split_features(df_origin):
"""Split features in numeric features, description, readme and label"""
df = df_origin.copy()
df = drop_defect_rows(df)
y = None
if "label" in df.columns:
y = df.pop("label")
df = normalize(df)
descr = df.pop("description")
readme = df.pop("readme")
return df, descr, readme, y
def train_and_predict(df_training, df_input, loops):
"""Use a VotingClassifier on top of an ensemble of numeric classifiers
and classifiers for the description and readme features"""
print 30 * "="
print "Fitting {} Voting Classifier(s)".format(loops)
print 30 * "="
df_training = normalize(df_training)
val_df = normalize(pd.read_csv(VALIDATION_DATA_PATH))
val_df = keep_useful_features(val_df, df_training.columns)
y_val = val_df.pop("label")
val_add_df = normalize(pd.read_csv(ADDITIONAL_VALIDATION_DATA_PATH))
val_add_df = keep_useful_features(val_add_df, df_training.columns)
y_val_add = val_add_df.pop("label")
_ = df_training.pop("Unnamed: 0")
_ = val_df.pop("Unnamed: 0")
_ = val_add_df.pop("Unnamed: 0")
best_average_score = 0
best_clf = None
for i in tqdm(range(loops)):
clf = VotingClassifier(estimators=[('description', DescriptionClassifier()),
('readme', ReadmeClassifier()),
('ensemble', NumericEnsembleClassifier())],
voting='soft')
df_train = get_undersample_df(df_training.copy())
_ = df_train.pop("index")
y_train = df_train.pop("label")
clf.fit(df_train, y_train)
val_score = clf.score(val_df, y_val)
val_add_score = clf.score(val_add_df, y_val_add)
if (val_score + val_add_score) / 2 > best_average_score:
best_clf = clf
best_average_score = (val_score + val_add_score) / 2
print 74 * "="
print "Using trained Voting Classifier with average accuracy on validation sets of {0:2f}".format((best_average_score))
print 74 * "="
predict(df_input, model_voting=best_clf)
def predict(df_input, model_voting=None):
if model_voting is None:
print 35 * "="
print 'Using pretrained Voting Classifier (67.74% on validation and 46.67% on additional validation data)'
print 35 * "="
model_voting = load_pickle(JOBLIB_VOTING_PIPELINE_NAME)
repository = df_input["repository"]
df_input = normalize(df_input)
descr = df_input["description"]
readme = df_input["readme"]
df_input = keep_useful_features(df_input, model_voting.estimators_[-1].useful_features)
df_input["description"] = descr
df_input["readme"] = readme
predictions = model_voting.predict(df_input)
df_input["label"] = predictions
df_input["repository"] = repository
df_input["repository"] = df_input.repository.apply(lambda x: "https://github.com/" + x)
df_input[["repository", "label"]].to_csv("predictions.txt", sep=' ', header=False, index=False, encoding='utf-8')
print "Saved predictions in predictions.txt"
return
def get_input_data(input_path):
with open(input_path) as f:
lines = f.read().splitlines()
return pd.Series(lines).to_frame("repository")
def get_training_data(training_path):
return pd.read_csv(training_path, sep=' ', names=["repository", "label"])
if __name__ == '__main__':
main()
| {
"content_hash": "2fbfae5b524531709685ee57648c606d",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 217,
"avg_line_length": 41.50967741935484,
"alnum_prop": 0.6506061548026111,
"repo_name": "WGierke/git_better",
"id": "5f2e344272b42f30b6178d6dfd8b9fa389bfe038",
"size": "6434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1647"
},
{
"name": "Jupyter Notebook",
"bytes": "1189683"
},
{
"name": "Python",
"bytes": "76383"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pkg_resources
logger = logging.getLogger(__name__)
class Extension(object):
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
self.cls = None
self.obj = None
def __repr__(self):
return 'Extension(%s)' % self.name
class ExtensionManager(object):
def __init__(self, namespace="ava.extension"):
self.namespace = namespace
self.extensions = []
def load_extensions(self, invoke_on_load=True):
for it in pkg_resources.working_set.iter_entry_points(self.namespace, name=None):
logger.debug("Loading extension: %s at module: %s", it.name, it.module_name)
logger.debug("")
ext = Extension(it.name, it)
ext.cls = it.load()
if invoke_on_load:
ext.obj = ext.cls()
self.extensions.append(ext)
# sort extensions by names
self.extensions.sort(key=lambda e: e.name)
logger.debug("Loaded extensions: %r", self.extensions)
def start_extensions(self, context=None):
for ext in self.extensions:
startfun = getattr(ext.obj, "start", None)
if startfun is not None and callable(startfun):
startfun(context)
def stop_extensions(self, context=None):
for ext in reversed(self.extensions):
stopfun = getattr(ext.obj, "stop", None)
if stopfun is not None and callable(stopfun):
try:
stopfun(context)
except Exception:
pass
#IGNORED.
__all__ = ['Extension', 'ExtensionManager']
| {
"content_hash": "962784340216287280876e9ac9917f52",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 89,
"avg_line_length": 31.07017543859649,
"alnum_prop": 0.5804630152456239,
"repo_name": "nickchen-mitac/fork",
"id": "53127495eab68b5cf78ebc61f6a41f3434871a91",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ava/ext/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10442"
},
{
"name": "HTML",
"bytes": "11410"
},
{
"name": "JavaScript",
"bytes": "25325"
},
{
"name": "Python",
"bytes": "445788"
},
{
"name": "Shell",
"bytes": "65"
}
],
"symlink_target": ""
} |
"""
Support for IP Cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.generic/
"""
import logging
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.const import (
CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_AUTHENTICATION,
HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION)
from homeassistant.exceptions import TemplateError
from homeassistant.components.camera import (PLATFORM_SCHEMA, Camera)
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_LIMIT_REFETCH_TO_URL_CHANGE = 'limit_refetch_to_url_change'
CONF_STILL_IMAGE_URL = 'still_image_url'
DEFAULT_NAME = 'Generic Camera'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_STILL_IMAGE_URL): cv.template,
vol.Optional(CONF_AUTHENTICATION, default=HTTP_BASIC_AUTHENTICATION):
vol.In([HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]),
vol.Optional(CONF_LIMIT_REFETCH_TO_URL_CHANGE, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup a generic IP Camera."""
add_devices([GenericCamera(hass, config)])
# pylint: disable=too-many-instance-attributes
class GenericCamera(Camera):
"""A generic implementation of an IP camera."""
def __init__(self, hass, device_info):
"""Initialize a generic camera."""
super().__init__()
self.hass = hass
self._name = device_info.get(CONF_NAME)
self._still_image_url = device_info[CONF_STILL_IMAGE_URL]
self._still_image_url.hass = hass
self._limit_refetch = device_info[CONF_LIMIT_REFETCH_TO_URL_CHANGE]
username = device_info.get(CONF_USERNAME)
password = device_info.get(CONF_PASSWORD)
if username and password:
if device_info[CONF_AUTHENTICATION] == HTTP_DIGEST_AUTHENTICATION:
self._auth = HTTPDigestAuth(username, password)
else:
self._auth = HTTPBasicAuth(username, password)
else:
self._auth = None
self._last_url = None
self._last_image = None
def camera_image(self):
"""Return a still image response from the camera."""
try:
url = self._still_image_url.render()
except TemplateError as err:
_LOGGER.error('Error parsing template %s: %s',
self._still_image_url, err)
return self._last_image
if url == self._last_url and self._limit_refetch:
return self._last_image
kwargs = {'timeout': 10, 'auth': self._auth}
try:
response = requests.get(url, **kwargs)
except requests.exceptions.RequestException as error:
_LOGGER.error('Error getting camera image: %s', error)
return None
self._last_url = url
self._last_image = response.content
return self._last_image
@property
def name(self):
"""Return the name of this device."""
return self._name
| {
"content_hash": "3835dfca9fcd088add85ef804504f57a",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 78,
"avg_line_length": 34.10204081632653,
"alnum_prop": 0.6606822262118492,
"repo_name": "varunr047/homefile",
"id": "5d7488b8e688c419a4516405f5ee042650709012",
"size": "3342",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/camera/generic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1594834"
},
{
"name": "JavaScript",
"bytes": "1216"
},
{
"name": "Python",
"bytes": "3696131"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
"""Contains the logic for `aq show cpu`."""
from sqlalchemy.orm import contains_eager, undefer
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.aqdb.model import Cpu, Vendor
class CommandShowCpu(BrokerCommand):
def render(self, session, cpu, vendor, speed, **arguments):
q = session.query(Cpu)
if cpu:
q = q.filter(Cpu.name.like(cpu + '%'))
if vendor:
dbvendor = Vendor.get_unique(session, vendor, compel=True)
q = q.filter_by(vendor=dbvendor)
if speed is not None:
q = q.filter_by(speed=speed)
q = q.join(Vendor)
q = q.options(undefer('comments'),
contains_eager('vendor'))
q = q.order_by(Vendor.name, Cpu.name)
return q.all()
| {
"content_hash": "08cb8ea6e4e3d8a3ce6a23ef20817423",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.6051980198019802,
"repo_name": "jrha/aquilon",
"id": "9b4014b781fb837cb0dc011cc709319e02e4bac9",
"size": "1511",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.6/aquilon/worker/commands/show_cpu.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import os
import datetime
import re
import json
import logging
from scrapy import Selector
from cameo.utility import Utility
from cameo.mod.yuwei.utility.scrapyUtility import scrapyUtility
"""
從 source_html 的 HTML 檔案解析資料
結果放置於 parsed_result 下
"""
class ParserForWEBACKERS:
#建構子
def __init__(self):
self.utility = Utility()
self.dicSubCommandHandler = {
"category":[self.parseCategoryPage],
"project":[
self.beforeParseProjectPage,
self.parseIntroPage,
self.parseSponsorPage,
self.parseProgressPage,
self.parseFaqPage,
self.afterParseProjectPage
],
"profile":[
self.beforeParseProfilePage,
self.parseProjPage,
self.parseOrderPage,
self.afterParseProfilePage
],
"automode":[self.parseProjectAndProfilePageAutoMode]
}
self.strWebsiteDomain = u"https://www.webackers.com"
self.lstStrCategoryName = [
"acg", "art", "charity", "design", "music",
"publication", "sport", "surprise", "technology", "video"
]
self.SOURCE_HTML_BASE_FOLDER_PATH = u"cameo_res\\source_html"
self.PARSED_RESULT_BASE_FOLDER_PATH = u"cameo_res\\parsed_result"
self.dicParsedResultOfCategory = {} #category.json 資料
self.dicParsedResultOfProject = {} #project.json 資料
self.dicParsedResultOfUpdate = {} #update.json 資料
self.dicParsedResultOfQanda = {} #qanda.json 資料
self.dicParsedResultOfReward = {} #reward.json 資料
self.dicParsedResultOfProfile = {} #profile.json 資料
#取得 parser 使用資訊
def getUseageMessage(self):
return (
"- WEBACKERS -\n"
"useage:\n"
"category - parse #_category.html then create project_url_list.txt\n"
"project category - parse project's html of given category, then create .json\n"
"profile category - parse profile's html of given category, then create .json\n"
"automode - parse project's and profile's html of all categories, then create .json\n"
)
#執行 parser
def runParser(self, lstSubcommand=None):
strSubcommand = lstSubcommand[0]
strArg1 = None
if len(lstSubcommand) == 2:
strArg1 = lstSubcommand[1]
for handler in self.dicSubCommandHandler[strSubcommand]:
handler(strArg1)
#tool method #####################################################################################
#將字串陣列合併之後再 strip
def stripTextArray(self, lstStrText=None):
strTextLine = u""
for strText in lstStrText:
if strText is not None:
strText = re.sub("\s", "", strText)
strTextLine = strTextLine + strText
return strTextLine.strip()
#解析 回饋組合的贊助狀態 字串 ex."1人待繳5人剩餘94人" return (1,5,94)
def parseStrRewardBacker(self, strRewardBacker=None):
(intPayed, intNotPayYet, intRemainQuta) = (0,0,0)
#pattern X人
m1 = re.match(u"^([0-9]*)人$", strRewardBacker)
if m1 is not None:
intPayed = int(m1.group(1))
return (intPayed, None, None)
#pattern X人待繳Y人剩餘Z人
m2 = re.match(u"^([0-9]*)人待繳([0-9]*)人剩餘([0-9]*)人$", strRewardBacker)
if m2 is not None:
intPayed = int(m2.group(1))
intNotPayYet = int(m2.group(2))
intRemainQuta = int(m2.group(3))
return (intPayed, intNotPayYet, intRemainQuta)
#pattern X人待繳Y人
m3 = re.match(u"^([0-9]*)人待繳([0-9]*)人$", strRewardBacker)
if m3 is not None:
intPayed = int(m3.group(1))
intNotPayYet = int(m3.group(2))
return (intPayed, intNotPayYet, None)
#pattern error
return None
#轉換預計出貨日期格式
def formatOriginStrRewardDeliveryDate(self, strOrigin=None):
strRet = None
if strOrigin is not None:
matchObj = re.search(u"^([0-9]*)年([0-9]*)月$", strOrigin)
if matchObj is not None:
strRet = "-".join([matchObj.group(1), matchObj.group(2), "01"])
return strRet
#category #####################################################################################
#解析 category.html
def parseCategoryPage(self, uselessArg1=None):
strBrowseResultFolderPath = self.PARSED_RESULT_BASE_FOLDER_PATH + u"\\WEBACKERS"
strBrowseHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\WEBACKERS"
lstStrCategoryHtmlFolderPath = self.utility.getSubFolderPathList(strBasedir=strBrowseHtmlFolderPath)
for strCategoryHtmlFolderPath in lstStrCategoryHtmlFolderPath: #各分類子資料夾
strCategoryResultFolderPath = strBrowseResultFolderPath + u"\\%s"%re.match("^.*WEBACKERS\\\\([a-z]*)$", strCategoryHtmlFolderPath).group(1)
if not os.path.exists(strCategoryResultFolderPath):
os.mkdir(strCategoryResultFolderPath) #mkdir parsed_result/WEBACKERS/category/
strCategoryJsonFilePath = strCategoryResultFolderPath + u"\\category.json"
#清空 dicParsedResultOfCategory 資料
self.dicParsedResultOfCategory = {}
self.dicParsedResultOfCategory["project_url_list"] = []
self.dicParsedResultOfCategory["profile_url_list"] = []
#解析各頁的 category.html 並將 url 集合於 json 檔案裡
lstStrCategoryHtmlFilePath = self.utility.getFilePathListWithSuffixes(strBasedir=strCategoryHtmlFolderPath, strSuffixes=u"category.html")
for strCategoryHtmlFilePath in lstStrCategoryHtmlFilePath: #category.html 各分頁
#記錄抓取時間
strCrawlTime = self.utility.getCtimeOfFile(strFilePath=strCategoryHtmlFilePath)
self.dicParsedResultOfCategory["strCrawlTime"] = strCrawlTime
with open(strCategoryHtmlFilePath, "r") as categoryHtmlFile:
strPageSource = categoryHtmlFile.read()
root = Selector(text=strPageSource)
#開始解析
lstStrProjectUrl = root.css("li.cbp-item div.thumbnail > a:first-of-type::attr(href)").extract()
lstStrProfileUrl = root.css("li.cbp-item div.thumbnail a.pull-left::attr(href)").extract()
#寫入 url
for strProjectUrl in lstStrProjectUrl:
#儲存在 category.html 頁面下的 project 資料
dicProjectData = {}
#strUrl
strFullProjectUrl = self.strWebsiteDomain + strProjectUrl
dicProjectData["strUrl"] = strFullProjectUrl
#strDescription and #strStatus
strDescription = None
strStatus = None
elesDivItemWrapper = root.css("div.cbp-item-wrapper")
for eleDivItemWrapper in elesDivItemWrapper:
if len(eleDivItemWrapper.css("div.thumbnail a[href='%s']"%strProjectUrl)) != 0:
strDescription = eleDivItemWrapper.css("div.thumbnail div.caption_view p::text").extract_first()
lstStrStatus = eleDivItemWrapper.css("div.case_msg_i li.timeitem::text").extract()
strStatus = self.stripTextArray(lstStrText=lstStrStatus)
dicProjectData["strDescription"] = strDescription
dicProjectData["strStatus"] = strStatus
#append project 資料
self.dicParsedResultOfCategory["project_url_list"].append(dicProjectData)
for strProfileUrl in lstStrProfileUrl:
strFullProfileUrl = self.strWebsiteDomain + strProfileUrl
self.dicParsedResultOfCategory["profile_url_list"].append(strFullProfileUrl)
self.utility.writeObjectToJsonFile(self.dicParsedResultOfCategory, strCategoryJsonFilePath)
#project #####################################################################################
#解析 project page(s) 之前
def beforeParseProjectPage(self, strCategoryName=None):
self.dicParsedResultOfProject = {} #project.json 資料
self.dicParsedResultOfUpdate = {} #update.json 資料
self.dicParsedResultOfQanda = {} #qanda.json 資料
self.dicParsedResultOfReward = {} #reward.json 資料
strProjectsResultFolderPath = self.PARSED_RESULT_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\projects"%strCategoryName)
if not os.path.exists(strProjectsResultFolderPath):
#mkdir parsed_result/WEBACKERS/category/projects/
os.mkdir(strProjectsResultFolderPath)
#解析 project page(s) 之後
def afterParseProjectPage(self, strCategoryName=None):
strProjectsResultFolderPath = self.PARSED_RESULT_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\projects"%strCategoryName)
#將 parse 結果寫入 json 檔案
self.utility.writeObjectToJsonFile(self.dicParsedResultOfProject, strProjectsResultFolderPath + u"\\project.json")
self.utility.writeObjectToJsonFile(self.dicParsedResultOfReward, strProjectsResultFolderPath + u"\\reward.json")
self.utility.writeObjectToJsonFile(self.dicParsedResultOfUpdate, strProjectsResultFolderPath + u"\\update.json")
self.utility.writeObjectToJsonFile(self.dicParsedResultOfQanda, strProjectsResultFolderPath + u"\\qanda.json")
#取得 在 category 頁面上的 project 資料,若無該 project return null
def findDicProjectDataOnCategoryJson(self, dicCategoryData=None, strProjUrl=None):
dicCurrentProjectData = None
for dicProjectData in dicCategoryData["project_url_list"]:
if dicProjectData["strUrl"] == strProjUrl:
dicCurrentProjectData = dicProjectData
break
return dicCurrentProjectData
#解析 intro.html
def parseIntroPage(self, strCategoryName=None):
#取得 category 頁面上的 project 資料
strCategoryJsonFilePath = self.PARSED_RESULT_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\category.json"%strCategoryName)
dicCategoryData = self.utility.readObjectFromJsonFile(strJsonFilePath=strCategoryJsonFilePath)
#load html
strProjectsHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\projects"%strCategoryName)
lstStrIntroHtmlFilePath = self.utility.getFilePathListWithSuffixes(strBasedir=strProjectsHtmlFolderPath, strSuffixes="_intro.html")
for strProjectIntroHtmlFilePath in lstStrIntroHtmlFilePath:
logging.info("parsing %s"%strProjectIntroHtmlFilePath)
with open(strProjectIntroHtmlFilePath, "r") as projectIntroHtmlFile:
strProjHtmlFileName = os.path.basename(projectIntroHtmlFile.name)
#取得 url
strProjId = re.search("^(.*)_intro.html$", strProjHtmlFileName).group(1)
strProjUrl = u"https://www.webackers.com/Proposal/Display/" + strProjId
#開始解析
strPageSource = projectIntroHtmlFile.read()
root = Selector(text=strPageSource)
#category.json 中若無 project 資料,略過處理
dicCurrentProjectData = self.findDicProjectDataOnCategoryJson(dicCategoryData=dicCategoryData, strProjUrl=strProjUrl)
if not dicCurrentProjectData:
continue
# - 解析 project.json -
if strProjUrl not in self.dicParsedResultOfProject:
self.dicParsedResultOfProject[strProjUrl] = {}
#strSource
self.dicParsedResultOfProject[strProjUrl]["strSource"] = \
u"WEBACKERS"
#strUrl
self.dicParsedResultOfProject[strProjUrl]["strUrl"] = \
strProjUrl
#strCrawlTime
strCrawlTime = dicCategoryData["strCrawlTime"]
self.dicParsedResultOfProject[strProjUrl]["strCrawlTime"] = strCrawlTime
#strProjectName
self.dicParsedResultOfProject[strProjUrl]["strProjectName"] = \
root.css("a[href*='%s'] span.case_title::text"%strProjId).extract_first().strip()
#strLocation
self.dicParsedResultOfProject[strProjUrl]["strLocation"] = u"Taiwan"
#strCity
self.dicParsedResultOfProject[strProjUrl]["strCity"] = u"Taiwan"
#strCountry
self.dicParsedResultOfProject[strProjUrl]["strCountry"] = u"TW"
#strContinent
self.dicParsedResultOfProject[strProjUrl]["strContinent"] = u"AS"
#strDescription
strDescription = dicCurrentProjectData["strDescription"]
self.dicParsedResultOfProject[strProjUrl]["strDescription"] = strDescription
#strIntroduction
strIntroduction = u""
for strIntroductionText in root.css("div.description *::text").extract():
strIntroduction = strIntroduction + strIntroductionText
self.dicParsedResultOfProject[strProjUrl]["strIntroduction"] = strIntroduction
#intStatus
dicMappingStatus = {
u"已完成":1,
u"已結束":2,
}
intStatus = 0
strStatus = dicCurrentProjectData["strStatus"]
if strStatus in dicMappingStatus:
intStatus = dicMappingStatus[strStatus]
else:
intStatus = 0
self.dicParsedResultOfProject[strProjUrl]["intStatus"] = intStatus
#strCreator
strCreator = root.css("aside.col-md-3 article:nth-of-type(5) h3 a::text").extract_first().strip()
self.dicParsedResultOfProject[strProjUrl]["strCreator"] = strCreator
#strCreatorUrl
strCreatorUrl = root.css("aside.col-md-3 article:nth-of-type(5) h3 a::attr('href')").extract_first().strip()
self.dicParsedResultOfProject[strProjUrl]["strCreatorUrl"] = self.strWebsiteDomain + strCreatorUrl
#strCategory and strSubCategory
strCategory = root.css("a[href*='category='] span.case_title::text").extract_first().strip()
self.dicParsedResultOfProject[strProjUrl]["strCategory"] = strCategory
self.dicParsedResultOfProject[strProjUrl]["strSubCategory"] = strCategory
#intFundTarget
strFundTarget = root.css("span.money_target::text").extract_first().strip()
intFundTarget = int(re.sub("[^0-9]", "", strFundTarget))
self.dicParsedResultOfProject[strProjUrl]["intFundTarget"] = intFundTarget
#intRaisedMoney
strRaisedMoney = root.css("span.money_now::text").extract_first().strip()
intRaisedMoney = int(re.sub("[^0-9]", "", strRaisedMoney))
self.dicParsedResultOfProject[strProjUrl]["intRaisedMoney"] = intRaisedMoney
#fFundProgress
fFundProgress = (float(intRaisedMoney) / float(intFundTarget)) * 100
self.dicParsedResultOfProject[strProjUrl]["fFundProgress"] = int(fFundProgress)
#strCurrency
self.dicParsedResultOfProject[strProjUrl]["strCurrency"] = u"NTD"
#intRemainDays
intRemainDays = self.utility.translateTimeleftTextToPureNum(strTimeleftText=strStatus, strVer="WEBACKERS")
self.dicParsedResultOfProject[strProjUrl]["intRemainDays"] = intRemainDays
#strEndDate
strEndDate = None
if intRemainDays > 0: #進行中
strCrawlTime = dicCategoryData["strCrawlTime"]
dtCrawlTime = datetime.datetime.strptime(strCrawlTime, "%Y-%m-%d")
dtEndDate = dtCrawlTime + datetime.timedelta(days=intRemainDays)
strEndDate = dtEndDate.strftime("%Y-%m-%d")
else:#已完成 或 已結束
strEndDate = root.css("aside.col-md-3 article:nth-of-type(4) div.panel-body span:nth-of-type(2)::text").extract_first().strip()
dtEndDate = datetime.datetime.strptime(strEndDate, "%Y/%m/%d %H:%M")
strEndDate = dtEndDate.strftime("%Y-%m-%d")
self.dicParsedResultOfProject[strProjUrl]["strEndDate"] = strEndDate
#strStartDate 無法取得
self.dicParsedResultOfProject[strProjUrl]["strStartDate"] = None
#intUpdate
intUpdate = int(root.css("ul.nav-tabs li a[href*='tab=progress'] div.badge::text").extract_first().strip())
self.dicParsedResultOfProject[strProjUrl]["intUpdate"] = intUpdate
#intBacker
intBacker = int(root.css("ul.nav-tabs li a[href*='tab=sponsor'] div.badge::text").extract_first().strip())
self.dicParsedResultOfProject[strProjUrl]["intBacker"] = intBacker
#intComment
intComment = int(root.css("ul.nav-tabs li a[href*='tab=faq'] div.badge::text").extract_first().strip())
self.dicParsedResultOfProject[strProjUrl]["intComment"] = intComment
#intFbLike
intFbLike = int(root.css("span.fbBtn span.fb_share_count::text").extract_first().strip())
self.dicParsedResultOfProject[strProjUrl]["intFbLike"] = intFbLike
#intVideoCount
intVideoCount = len(root.css("div.description iframe[src*='youtube'], div.flex-video"))
self.dicParsedResultOfProject[strProjUrl]["intVideoCount"] = intVideoCount
#intImageCount
intImageCount = len(root.css("div.description img[src*='image']"))
self.dicParsedResultOfProject[strProjUrl]["intImageCount"] = intImageCount
#isPMSelect 無法取得
self.dicParsedResultOfProject[strProjUrl]["isPMSelect"] = None
#
# - 解析 reward.json -
lstDicRewardData = []
elesReward = root.css("aside article div.panel")
for eleReward in elesReward:
if len(eleReward.css("div.panel-case")) != 0:
dicRewardData = {}
#strUrl
dicRewardData["strUrl"] = strProjUrl
#strRewardContent
lstStrRewardContent = eleReward.css("div.panel-body div.fa-black_h.padding_space.txt_line_fix::text").extract()
strRewardContent = self.stripTextArray(lstStrText=lstStrRewardContent)
dicRewardData["strRewardContent"] = strRewardContent
#intRewardMoney
strRewardMoney = eleReward.css("div.panel-case div.pull-left span.font_m1::text").extract_first().strip()
intRewardMoney = int(re.sub("[^0-9]", "", strRewardMoney))
dicRewardData["intRewardMoney"] = intRewardMoney
#intRewardBacker
lstStrRewardBacker = eleReward.css("div.panel-case div.pull-right::text").extract()
strRewardBacker = self.stripTextArray(lstStrText=lstStrRewardBacker) #ex. "1人待繳5人剩餘94人"
(intPayed, intNotPayYet, intRemainQuta) = self.parseStrRewardBacker(strRewardBacker=strRewardBacker)
intRewardBacker = intPayed
dicRewardData["intRewardBacker"] = intRewardBacker
#intRewardLimit
intRewardLimit = 0
if intRemainQuta is not None:
intRewardLimit = sum((intPayed, intNotPayYet, intRemainQuta))
dicRewardData["intRewardLimit"] = intRewardLimit
#strRewardShipTo and strRewardDeliveryDate
lstStrDeliveryDateAndShipTo = eleReward.css("div.panel-body div.fa-black_h.bg_gray_h::text").extract()
strDeliveryDateAndShipTo = self.stripTextArray(lstStrText=lstStrDeliveryDateAndShipTo) #"寄送條件:.*預計送達:.*"
mDeliveryDateAndShipTo = re.match(u"^寄送條件:(.*)預計送達:(.*)$", strDeliveryDateAndShipTo)
(strRewardDeliveryDate, strRewardShipTo) = (None, None)
if mDeliveryDateAndShipTo is not None:
strRewardShipTo = mDeliveryDateAndShipTo.group(1)
strRewardDeliveryDate = mDeliveryDateAndShipTo.group(2)
dicRewardData["strRewardShipTo"] = strRewardShipTo
strRewardDeliveryDate = self.formatOriginStrRewardDeliveryDate(strOrigin=strRewardDeliveryDate) #轉換格式 2016年04月 -> 2016-04-01
dicRewardData["strRewardDeliveryDate"] = strRewardDeliveryDate
#intRewardRetailPrice
dicRewardData["intRewardRetailPrice"] = scrapyUtility.getRetailPrice(strRewardContent, [u"原價", u"市價", u"價", u"零售"], intRewardMoney=intRewardMoney)
#append reward 資料
lstDicRewardData.append(dicRewardData)
self.dicParsedResultOfReward[strProjUrl] = lstDicRewardData
#解析 sponsor.html
def parseSponsorPage(self, strCategoryName=None):
#取得 category 頁面上的 project 資料
strCategoryJsonFilePath = self.PARSED_RESULT_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\category.json"%strCategoryName)
dicCategoryData = self.utility.readObjectFromJsonFile(strJsonFilePath=strCategoryJsonFilePath)
#load html
strProjectsHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\projects"%strCategoryName)
lstStrSponsorHtmlFilePath = self.utility.getFilePathListWithSuffixes(strBasedir=strProjectsHtmlFolderPath, strSuffixes="_sponsor.html")
for strProjectSponsorHtmlFilePath in lstStrSponsorHtmlFilePath:
logging.info("parsing %s"%strProjectSponsorHtmlFilePath)
with open(strProjectSponsorHtmlFilePath, "r") as projectSponsorHtmlFile:
strProjHtmlFileName = os.path.basename(projectSponsorHtmlFile.name)
#取得 url
strProjId = re.search("^(.*)_sponsor.html$", strProjHtmlFileName).group(1)
strProjUrl = u"https://www.webackers.com/Proposal/Display/" + strProjId
#開始解析
strPageSource = projectSponsorHtmlFile.read()
root = Selector(text=strPageSource)
#category.json 中若無 project 資料,略過處理
dicCurrentProjectData = self.findDicProjectDataOnCategoryJson(dicCategoryData=dicCategoryData, strProjUrl=strProjUrl)
if not dicCurrentProjectData:
continue
# - 解析 project.json (加入 lstStrBacker)-
if strProjUrl not in self.dicParsedResultOfProject:
self.dicParsedResultOfProject[strProjUrl] = {}
#lstStrBacker
lstStrBacker = root.css("div#sponsor_panel p a.fa-black_h::text").extract()
self.dicParsedResultOfProject[strProjUrl]["lstStrBacker"] = lstStrBacker
#解析 progress.html
def parseProgressPage(self, strCategoryName=None):
#取得 category 頁面上的 project 資料
strCategoryJsonFilePath = self.PARSED_RESULT_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\category.json"%strCategoryName)
dicCategoryData = self.utility.readObjectFromJsonFile(strJsonFilePath=strCategoryJsonFilePath)
#load html
strProjectsHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\projects"%strCategoryName)
lstStrProgressHtmlFilePath = self.utility.getFilePathListWithSuffixes(strBasedir=strProjectsHtmlFolderPath, strSuffixes="_progress.html")
for strProjectProgressHtmlFilePath in lstStrProgressHtmlFilePath:
logging.info("parsing %s"%strProjectProgressHtmlFilePath)
with open(strProjectProgressHtmlFilePath, "r") as projectProgressHtmlFile:
strProjHtmlFileName = os.path.basename(projectProgressHtmlFile.name)
#取得 url
strProjId = re.search("^(.*)_progress.html$", strProjHtmlFileName).group(1)
strProjUrl = u"https://www.webackers.com/Proposal/Display/" + strProjId
#開始解析
strPageSource = projectProgressHtmlFile.read()
root = Selector(text=strPageSource)
#category.json 中若無 project 資料,略過處理
dicCurrentProjectData = self.findDicProjectDataOnCategoryJson(dicCategoryData=dicCategoryData, strProjUrl=strProjUrl)
if not dicCurrentProjectData:
continue
#-解析 update.json-
lstDicUpdateData = []
elesUpdate = root.css("div.active div.panel-group")
for eleUpdate in elesUpdate:
dicUpdateData = {}
#strUrl
dicUpdateData["strUrl"] = strProjUrl
#strUpdateTitle
strUpdateTitle = eleUpdate.css("div.panel-heading div.pull-left h4::text").extract_first().strip()
dicUpdateData["strUpdateTitle"] = strUpdateTitle
#strUpdateContent
lstStrUpdateContentText = eleUpdate.css("div.panel-body div.content_area *::text").extract()
strUpdateContent = self.stripTextArray(lstStrText=lstStrUpdateContentText)
dicUpdateData["strUpdateContent"] = strUpdateContent
#strUpdateDate
strUpdateDate = eleUpdate.css("div.panel-heading div.pull-right span:nth-of-type(2)::text").extract_first().strip()
strUpdateDate = re.sub("/", "-", strUpdateDate)
dicUpdateData["strUpdateDate"] = strUpdateDate
#append update 資料
lstDicUpdateData.append(dicUpdateData)
self.dicParsedResultOfUpdate[strProjUrl] = lstDicUpdateData
#解析 faq.html
def parseFaqPage(self, strCategoryName=None):
#取得 category 頁面上的 project 資料
strCategoryJsonFilePath = self.PARSED_RESULT_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\category.json"%strCategoryName)
dicCategoryData = self.utility.readObjectFromJsonFile(strJsonFilePath=strCategoryJsonFilePath)
#load html
strProjectsHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\projects"%strCategoryName)
lstStrFaqHtmlFilePath = self.utility.getFilePathListWithSuffixes(strBasedir=strProjectsHtmlFolderPath, strSuffixes="_faq.html")
for strProjectFaqHtmlFilePath in lstStrFaqHtmlFilePath:
logging.info("parsing %s"%strProjectFaqHtmlFilePath)
with open(strProjectFaqHtmlFilePath, "r") as projectFaqHtmlFile:
strProjHtmlFileName = os.path.basename(projectFaqHtmlFile.name)
#取得 url
strProjId = re.search("^(.*)_faq.html$", strProjHtmlFileName).group(1)
strProjUrl = u"https://www.webackers.com/Proposal/Display/" + strProjId
#開始解析
strPageSource = projectFaqHtmlFile.read()
root = Selector(text=strPageSource)
#category.json 中若無 project 資料,略過處理
dicCurrentProjectData = self.findDicProjectDataOnCategoryJson(dicCategoryData=dicCategoryData, strProjUrl=strProjUrl)
if not dicCurrentProjectData:
continue
#-解析 qanda.json-
lstDicQandaData = []
elesQanda = root.css("div.panel-group div.panel")
for eleQanda in elesQanda:
dicQandaData = {}
#strUrl
dicQandaData["strUrl"] = strProjUrl
#strQnaQuestion
lstStrQnaQuestionText = eleQanda.css("div.panel-heading a::text").extract()
strQnaQuestion = self.stripTextArray(lstStrText=lstStrQnaQuestionText)
dicQandaData["strQnaQuestion"] = strQnaQuestion
#strQnaAnswer
lstStrQnaAnswerText = eleQanda.css("div.panel-collapse div.panel-body div.reply::text").extract()
strQnaAnswer = self.stripTextArray(lstStrText=lstStrQnaAnswerText)
dicQandaData["strQnaAnswer"] = strQnaAnswer
#strQnaDate 無法取得
dicQandaData["strQnaDate"] = None
#append qanda 資料
lstDicQandaData.append(dicQandaData)
self.dicParsedResultOfQanda[strProjUrl] = lstDicQandaData
#profile #####################################################################################
#解析 profile page(s) 之前
def beforeParseProfilePage(self, strCategoryName=None):
self.dicParsedResultOfProfile = {} #profile.json 資料
strProfilesResultFolderPath = self.PARSED_RESULT_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\profiles"%strCategoryName)
if not os.path.exists(strProfilesResultFolderPath):
#mkdir parsed_result/WEBACKERS/category/profiles
os.mkdir(strProfilesResultFolderPath)
#解析 profile page(s) 之後
def afterParseProfilePage(self, strCategoryName=None):
strProfilesResultFolderPath = self.PARSED_RESULT_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\profiles"%strCategoryName)
#將 parse 結果寫入 json 檔案
self.utility.writeObjectToJsonFile(self.dicParsedResultOfProfile, strProfilesResultFolderPath + u"\\profile.json")
#解析 proj.html
def parseProjPage(self, strCategoryName=None):
strProfilesHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\profiles"%strCategoryName)
lstStrProjHtmlFilePath = self.utility.getFilePathListWithSuffixes(strBasedir=strProfilesHtmlFolderPath, strSuffixes="_proj.html")
for strProfileProjHtmlFilePath in lstStrProjHtmlFilePath:
logging.info("parsing %s"%strProfileProjHtmlFilePath)
with open(strProfileProjHtmlFilePath, "r") as profileProjHtmlFile:
strProfHtmlFileName = os.path.basename(profileProjHtmlFile.name)
#取得 url
strProfId = re.search("^(.*)_proj.html$", strProfHtmlFileName).group(1)
strProfUrl = u"https://www.webackers.com/Proposal/CreatorProfile?proposalId=" + strProfId
if strProfUrl not in self.dicParsedResultOfProfile:
self.dicParsedResultOfProfile[strProfUrl] = {}
#開始解析
strPageSource = profileProjHtmlFile.read()
root = Selector(text=strPageSource)
#strUrl
self.dicParsedResultOfProfile[strProfUrl]["strUrl"] = strProfUrl
#strName
lstStrNameText = root.css("h4.fa-black::text").extract()
strName = self.stripTextArray(lstStrText=lstStrNameText)
self.dicParsedResultOfProfile[strProfUrl]["strName"] = strName
#strIdentityName 同 strName
self.dicParsedResultOfProfile[strProfUrl]["strIdentityName"] = strName
#strDescription
lstStrDescription = root.css("p small.fa-gray::text").extract()
strDescription = self.stripTextArray(lstStrText=lstStrDescription)
self.dicParsedResultOfProfile[strProfUrl]["strDescription"] = strDescription
#strLocation
self.dicParsedResultOfProfile[strProfUrl]["strLocation"] = u"Taiwan"
#strCity
self.dicParsedResultOfProfile[strProfUrl]["strCity"] = u"Taiwan"
#strCountry
self.dicParsedResultOfProfile[strProfUrl]["strCountry"] = u"TW"
#strContinent
self.dicParsedResultOfProfile[strProfUrl]["strContinent"] = u"AS"
#intCreatedCount
intCreatedCount = int(root.css("ul.nav-tabs li a[href*='tab=project'] div.badge::text").extract_first())
self.dicParsedResultOfProfile[strProfUrl]["intCreatedCount"] = intCreatedCount
#intBackedCount
intBackedCount = int(root.css("ul.nav-tabs li a[href*='tab=order'] div.badge::text").extract_first())
self.dicParsedResultOfProfile[strProfUrl]["intBackedCount"] = intBackedCount
#isCreator
isCreator = (True if intCreatedCount > 0 else False)
self.dicParsedResultOfProfile[strProfUrl]["isCreator"] = isCreator
#isBacker
isBacker = (True if intBackedCount > 0 else False)
self.dicParsedResultOfProfile[strProfUrl]["isBacker"] = isBacker
#lstStrCreatedProject and lstStrCreatedProjectUrl
elesCreatedProject = root.css("div.panel-body div.col-sm-6.col-md-4.col-lg-4.col-xs-12")
lstStrCreatedProject = []
lstStrCreatedProjectUrl = []
lstStrCreatedProjectStatus = []
for eleCreatedProject in elesCreatedProject:
lstStrCreatedProjectText = eleCreatedProject.css("div.thumbnail div.caption h4::text").extract()
strCreatedProject = self.stripTextArray(lstStrText=lstStrCreatedProjectText)
lstStrCreatedProject.append(strCreatedProject)
strCreatedProjectUrl = (self.strWebsiteDomain +
eleCreatedProject.css("div.thumbnail a::attr('href')").extract_first().strip())
lstStrCreatedProjectUrl.append(strCreatedProjectUrl)
#記錄 status 以用來計算 intLiveProject, intSuccessProject, intFailedProject
lstStrCreatedProjectStatusText = eleCreatedProject.css("div.about_i li.timeitem::text").extract()
strCreatedProjectStatus = self.stripTextArray(lstStrText=lstStrCreatedProjectStatusText)
lstStrCreatedProjectStatus.append(strCreatedProjectStatus)
self.dicParsedResultOfProfile[strProfUrl]["lstStrCreatedProject"] = lstStrCreatedProject
self.dicParsedResultOfProfile[strProfUrl]["lstStrCreatedProjectUrl"] = lstStrCreatedProjectUrl
#intSuccessProject
intSuccessProject = lstStrCreatedProjectStatus.count(u"已完成")
self.dicParsedResultOfProfile[strProfUrl]["intSuccessProject"] = intSuccessProject
#intFailedProject
intFailedProject = lstStrCreatedProjectStatus.count(u"已結束")
self.dicParsedResultOfProfile[strProfUrl]["intFailedProject"] = intFailedProject
#intLiveProject
intLiveProject = len(lstStrCreatedProjectStatus) - intSuccessProject - intFailedProject
self.dicParsedResultOfProfile[strProfUrl]["intLiveProject"] = intLiveProject
#lstStrSocialNetwork 無法取得
self.dicParsedResultOfProfile[strProfUrl]["lstStrSocialNetwork"] = None
#intFbFriend 無法取得
self.dicParsedResultOfProfile[strProfUrl]["intFbFriend"] = None
#strLastLoginDate 無法取得
self.dicParsedResultOfProfile[strProfUrl]["strLastLoginDate"] = None
#解析 order.html
def parseOrderPage(self, strCategoryName=None):
strProfilesHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + (u"\\WEBACKERS\\%s\\profiles"%strCategoryName)
lstStrOrderHtmlFilePath = self.utility.getFilePathListWithSuffixes(strBasedir=strProfilesHtmlFolderPath, strSuffixes="_order.html")
for strProfileOrderHtmlFilePath in lstStrOrderHtmlFilePath:
logging.info("parsing %s"%strProfileOrderHtmlFilePath)
with open(strProfileOrderHtmlFilePath, "r") as profileOrderHtmlFile:
strProfHtmlFileName = os.path.basename(profileOrderHtmlFile.name)
#取得 url
strProfId = re.search("^(.*)_order.html$", strProfHtmlFileName).group(1)
strProfUrl = u"https://www.webackers.com/Proposal/CreatorProfile?proposalId=" + strProfId
if strProfUrl not in self.dicParsedResultOfProfile:
self.dicParsedResultOfProfile[strProfUrl] = {}
#開始解析
strPageSource = profileOrderHtmlFile.read()
root = Selector(text=strPageSource)
#lstStrBackedProject and lstStrBackedProjectUrl
elesBackedProject = root.css("div#history-panel div.col-sm-6.col-md-4.col-lg-4.col-xs-12")
lstStrBackedProject = []
lstStrBackedProjectUrl = []
for eleBackedProject in elesBackedProject:
strBackedProject = eleBackedProject.css("div.thumbnail div.caption h4::text").extract_first().strip()
lstStrBackedProject.append(strBackedProject)
strBackedProjectUrl = (self.strWebsiteDomain +
eleBackedProject.css("div.thumbnail > a::attr('href')").extract_first().strip())
lstStrBackedProjectUrl.append(strBackedProjectUrl)
self.dicParsedResultOfProfile[strProfUrl]["lstStrBackedProject"] = lstStrBackedProject
self.dicParsedResultOfProfile[strProfUrl]["lstStrBackedProjectUrl"] = lstStrBackedProjectUrl
#解析 sub.html 目前無用處暫不處理
def parseSubPage(self, strCategoryName=None):
pass
#automode #####################################################################################
#全自動 解析 所有類別 的 案件頁面 及 個人資料頁面
def parseProjectAndProfilePageAutoMode(self, uselessArg1=None):
for strCategoryName in self.lstStrCategoryName:
#parse project page
self.beforeParseProjectPage(strCategoryName)
self.parseIntroPage(strCategoryName)
self.parseSponsorPage(strCategoryName)
self.parseProgressPage(strCategoryName)
self.parseFaqPage(strCategoryName)
self.afterParseProjectPage(strCategoryName)
#parse profile page
self.beforeParseProfilePage(strCategoryName)
self.parseProjPage(strCategoryName)
self.parseOrderPage(strCategoryName)
self.afterParseProfilePage(strCategoryName) | {
"content_hash": "253044161eed65a34d2d20d3c4c0adce",
"timestamp": "",
"source": "github",
"line_count": 630,
"max_line_length": 167,
"avg_line_length": 60.198412698412696,
"alnum_prop": 0.6298483849703362,
"repo_name": "muchu1983/104_cameo",
"id": "bd462ca8b571867c4068a7efa3459a78debfacbe",
"size": "38893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cameo/parserForWEBACKERS.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4028"
},
{
"name": "HTML",
"bytes": "885957"
},
{
"name": "Python",
"bytes": "738810"
}
],
"symlink_target": ""
} |
from .models import User as UserProjection
from cq.handlers import register_handler
@register_handler('User', 'Registered')
def handle_registered_user(event, replaying_events):
UserProjection.objects.create(
id=event.aggregate_id,
email=event.data['email'],
registered_at=event.ts,
role=event.data['role'],
)
# send email with activation token etc
| {
"content_hash": "822d39f36dab78afd0e6ea1b96602c40",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 30.307692307692307,
"alnum_prop": 0.6954314720812182,
"repo_name": "lukaszb/cq",
"id": "2e2aba1f0e68cccfa185f5b5eca34aa68f2e97dc",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "examples/djangoapp/accounts/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1693"
},
{
"name": "Python",
"bytes": "43837"
}
],
"symlink_target": ""
} |
from django.contrib.auth import authenticate, logout
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import transaction
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import render
from django.views import generic
from django.views.decorators.csrf import csrf_exempt
import json
from .models import Todo
# Create your views here.
def login(request):
from django.contrib.auth import login
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('todo:index'))
user = None
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username = username, password = password)
if user is not None and user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('todo:index'))
return render(request, 'todo/login.html', {
'user': user
})
def log_out(request):
logout(request)
return HttpResponseRedirect(reverse('todo:login'))
def index(request):
if not request.user.is_authenticated():
return HttpResponseBadRequest('Please login from ' + reverse('todo:login'))
todo_list = Todo.objects.filter(user = request.user).order_by('order')
return render(request, 'todo/index.html', {
'todo_list': todo_list
})
@csrf_exempt
@transaction.atomic
def save(request):
if not request.user.is_authenticated():
return HttpResponseBadRequest('Please login from ' + reverse('todo:login'))
received_json_data = json.loads(request.body.decode('utf8'))
# register new todo
for new_todo in received_json_data['newTodos']:
t = Todo(
user = request.user,
todo_text = new_todo['todoText'],
order = new_todo['order'],
)
t.save()
# update todo order
for todo in received_json_data['updateTodos']:
t = Todo.objects.get(pk = todo['todoId'])
t.order = todo['order']
t.save()
# delete todo
for todo_id in received_json_data['deleteTodoIds']:
Todo.objects.get(pk = todo_id).delete()
return HttpResponse("ok")
class DetailView(generic.DetailView):
model = Todo
template_name = 'todo/detail.html'
class EditView(generic.UpdateView):
model = Todo
fields = ['todo_text']
template_name = 'todo/edit.html'
success_url = reverse_lazy('todo:index')
| {
"content_hash": "80ce444850ac7284344831f5d2c2654d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 83,
"avg_line_length": 29.83529411764706,
"alnum_prop": 0.6707413249211357,
"repo_name": "Lhacker/WebTodo",
"id": "db9eeaf42fa7da60ea7c1d3467e6dffb54b9ee02",
"size": "2536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webtodo/todo/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1239"
},
{
"name": "HTML",
"bytes": "7778"
},
{
"name": "Python",
"bytes": "7263"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bulkvote', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='item',
name='item',
field=models.CharField(max_length=500),
),
]
| {
"content_hash": "23c112bdbb005e3bc2090480b0ad4dd4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 51,
"avg_line_length": 20.27777777777778,
"alnum_prop": 0.5808219178082191,
"repo_name": "tykling/bulkvote",
"id": "65791a3aefaf8f4bd766359b9b40430d89d4d5b6",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bulkvote/migrations/0002_auto_20151102_2232.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18675"
},
{
"name": "HTML",
"bytes": "7377"
},
{
"name": "JavaScript",
"bytes": "80480"
},
{
"name": "Python",
"bytes": "11704"
}
],
"symlink_target": ""
} |
import unittest
from kombu_redis_priority.scheduling.prioritized_levels import \
HIGHEST_LEVEL, PrioritizedLevelsQueueScheduler
class TestPrioritizedLevelsQueueScheduler(unittest.TestCase):
BASE_CONFIG = {
0: ['TimeMachine', 'FluxCapacitor'],
1: ['1985', '1955', '2015']
}
def test_prioritized_levels_scheduler_gets_queue_at_top_of_lowest_level(self):
scheduler = PrioritizedLevelsQueueScheduler(self.BASE_CONFIG)
self.assertEqual(scheduler.next(), 'TimeMachine')
def test_prioritized_levels_scheduler_next_with_empty(self):
scheduler = PrioritizedLevelsQueueScheduler(self.BASE_CONFIG)
scheduler.update([])
self.assertEqual(scheduler.next(), None)
def test_prioritized_levels_scheduler_update_filters_out_queues_not_in_list(self):
scheduler = PrioritizedLevelsQueueScheduler(self.BASE_CONFIG)
scheduler.update(['TimeMachine'])
self.assertEqual(scheduler.queue_config, {0: ['TimeMachine']})
self.assertEqual(scheduler.current_level, 0)
self.assertEqual(scheduler.queue_cycle.items, ['TimeMachine'])
def test_prioritized_levels_scheduler_rotate_full_rotation_empty(self):
scheduler = PrioritizedLevelsQueueScheduler(self.BASE_CONFIG)
queues = ['TimeMachine', 'FluxCapacitor', '1985', '1955']
for q in queues:
self.assertEqual(scheduler.next(), q)
self.assertFalse(scheduler.rotate(q, True))
self.assertEqual(scheduler.next(), '2015')
self.assertTrue(scheduler.rotate('2015', True))
def test_prioritized_levels_scheduler_jumps_on_empty_full_rotation(self):
scheduler = PrioritizedLevelsQueueScheduler(self.BASE_CONFIG)
# full empty rotation on level 0 causes scheduler to jump to next level
self.assertEqual(scheduler.current_level, 0)
for q in self.BASE_CONFIG[0]:
self.assertEqual(scheduler.next(), q)
self.assertFalse(scheduler.rotate(q, True))
self.assertEqual(scheduler.current_level, 1)
self.assertEqual(scheduler.next(), '1985')
def test_prioritized_levels_scheduler_fully_rotates_level(self):
scheduler = PrioritizedLevelsQueueScheduler(self.BASE_CONFIG)
scheduler._set_level(1)
self.assertEqual(scheduler.next(), '1985')
self.assertFalse(scheduler.rotate('1985', False))
self.assertEqual(scheduler.next(), '1955')
def test_prioritized_levels_scheduler_moves_to_lowest_level_when_consuming_higher_level_nonempty(self):
config = self.BASE_CONFIG.copy()
config[2] = ['Marty', 'Doc']
scheduler = PrioritizedLevelsQueueScheduler(config)
scheduler._set_level(1)
for q in config[1]:
self.assertEqual(scheduler.next(), q)
self.assertFalse(scheduler.rotate(q, False))
self.assertEqual(scheduler.current_level, 0)
self.assertEqual(scheduler.next(), 'TimeMachine')
def test_prioritized_levels_scheduler_update_non_existant_queue(self):
scheduler = PrioritizedLevelsQueueScheduler(self.BASE_CONFIG)
scheduler.update(['Marty'])
self.assertEqual(scheduler.queue_config, {HIGHEST_LEVEL: ['Marty']})
| {
"content_hash": "2934e0f5efec0978b3397becb3d8299f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 107,
"avg_line_length": 46.7536231884058,
"alnum_prop": 0.689398636081835,
"repo_name": "Captricity/kombu-redis-priority",
"id": "5c769d855197eff874598c2da22b51126df83d61",
"size": "3226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scheduler/test_prioritized_levels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "63437"
}
],
"symlink_target": ""
} |
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
class OrbitConan(ConanFile):
name = "OrbitProfiler"
version = "0.0.1"
license = "BSD-2-Clause"
url = "https://github.com/pierricgimmig/orbitprofiler.git"
description = "C/C++ Performance Profiler"
settings = "os", "compiler", "build_type", "arch"
generators = [ "cmake_find_package_multi", "cmake" ]
options = {"system_mesa": [True, False], "ggp": [True, False],
"system_qt": [True, False], "with_gui": [True, False]}
default_options = {"system_mesa": True, "ggp": False,
"system_qt": True, "with_gui": True}
_orbit_channel = "orbitdeps/stable"
exports_sources = "CMakeLists.txt", "Orbit*", "bin/*", "cmake/*", "external/*", "LICENSE"
def requirements(self):
if self.settings.os != "Windows" and self.options.with_gui and not self.options.system_qt and self.options.system_mesa:
raise ConanInvalidConfiguration("When disabling system_qt, you also have to "
"disable system mesa.")
self.requires("asio/1.12.2@bincrafters/stable")
self.requires("abseil/20190808@{}".format(self._orbit_channel))
self.requires("bzip2/1.0.8@conan/stable")
self.requires("capstone/4.0.1@{}".format(self._orbit_channel))
self.requires("cereal/1.3.0@{}".format(self._orbit_channel))
self.requires("gtest/1.8.1@bincrafters/stable")
self.requires("libcurl/7.66.0")
self.requires("llvm_object/9.0.1@orbitdeps/stable")
self.requires("openssl/1.1.1d@{}".format(self._orbit_channel))
if self.settings.os != "Windows":
self.requires("libunwindstack/80a734f14@{}".format(self._orbit_channel))
self.requires("zlib/1.2.11@conan/stable")
if self.settings.os == "Windows":
self.requires("breakpad/216cea7b@{}".format(self._orbit_channel))
if self.options.with_gui:
self.requires("freeglut/3.2.1@{}".format(self._orbit_channel))
self.requires("freetype/2.10.0@bincrafters/stable")
self.requires("freetype-gl/8d9a97a@{}".format(self._orbit_channel))
self.requires("glew/2.1.0@{}".format(self._orbit_channel))
self.requires("imgui/1.69@bincrafters/stable")
self.requires("libpng/1.6.37@bincrafters/stable")
if not self.options.system_mesa:
self.requires("libxi/1.7.10@bincrafters/stable")
if not self.options.system_qt:
self.requires("qt/5.14.1@bincrafters/stable")
def configure(self):
self.options["abseil"].cxx_standard = 17
if self.options.with_gui:
self.options["glew"].system_mesa = self.options.system_mesa
self.options["freeglut"].system_mesa = self.options.system_mesa
if not self.options.system_qt:
self.options["qt"].shared = True
self.options["qt"].with_sqlite3 = False
self.options["qt"].with_mysql = False
self.options["qt"].with_pq = False
self.options["qt"].with_odbc = False
self.options["qt"].with_sdl2 = False
self.options["qt"].with_openal = False
if self.settings.os == "Windows":
self.options["qt"].qttools = True
self.options["qt"].with_glib = False
self.options["qt"].with_harfbuzz = False
def build(self):
cmake = CMake(self)
cmake.definitions["WITH_GUI"] = "ON" if self.options.with_gui else "OFF"
cmake.configure()
cmake.build()
if not self.options.ggp:
cmake.test()
def imports(self):
dest = os.getenv("CONAN_IMPORT_PATH", "bin")
self.copy("*.dll", src="@bindirs", dst=dest)
self.copy("*.so*", src="@libdirs", dst=dest)
if self.options.with_gui:
for path in self.deps_cpp_info["freetype-gl"].resdirs:
self.copy("Vera.ttf", src=path, dst="{}/fonts/".format(dest))
self.copy("Vera.ttf", src=path, dst="{}/fonts/".format("OrbitQt/"))
self.copy("v3f-t2f-c4f.*", src=path, dst="{}/shaders/".format(dest))
self.copy("v3f-t2f-c4f.*", src=path, dst="{}/shaders/".format("OrbitQt/"))
def package(self):
self.copy("*", src="bin/dri", dst="bin/dri", symlinks=True)
self.copy("*", src="bin/fonts", dst="bin/fonts", symlinks=True)
self.copy("*", src="bin/shaders", dst="bin/shaders", symlinks=True)
self.copy("*.so*", src="bin/", dst="bin", symlinks=True)
if self.settings.os == "Windows":
self.copy("*.dll", src="bin/", dst="bin", symlinks=True)
self.copy("Orbit", src="bin/", dst="bin")
self.copy("Orbit.exe", src="bin/", dst="bin")
self.copy("Orbit.pdb", src="bin/", dst="bin")
self.copy("OrbitService*", src="bin/", dst="bin")
def deploy(self):
self.copy("*", src="bin", dst="bin")
| {
"content_hash": "58bf24142e92e2196e425415e3d4454b",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 127,
"avg_line_length": 48.094339622641506,
"alnum_prop": 0.5790506080816006,
"repo_name": "pierricgimmig/orbitprofiler",
"id": "7966fa8739563727e9e39fa6c44840d366dab0b1",
"size": "5098",
"binary": false,
"copies": "1",
"ref": "refs/heads/headless",
"path": "conanfile.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "5798"
},
{
"name": "Batchfile",
"bytes": "5600"
},
{
"name": "C",
"bytes": "105310"
},
{
"name": "C++",
"bytes": "1978191"
},
{
"name": "CMake",
"bytes": "55219"
},
{
"name": "Objective-C",
"bytes": "1392"
},
{
"name": "Python",
"bytes": "102532"
},
{
"name": "QMake",
"bytes": "1219"
},
{
"name": "Shell",
"bytes": "8737"
}
],
"symlink_target": ""
} |
from CoralNet.images.tasks import *
classify_wrapper()
| {
"content_hash": "dec0a78df7456b98159417cf2466af80",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 35,
"avg_line_length": 27.5,
"alnum_prop": 0.8,
"repo_name": "DevangS/CoralNet",
"id": "dc6fa6ae51728366fc06c6f67b6fb8961962ac7a",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classify_task.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "30089"
},
{
"name": "C++",
"bytes": "37023"
},
{
"name": "CSS",
"bytes": "85725"
},
{
"name": "HTML",
"bytes": "875721"
},
{
"name": "JavaScript",
"bytes": "406565"
},
{
"name": "Jupyter Notebook",
"bytes": "495187"
},
{
"name": "M",
"bytes": "1647"
},
{
"name": "Matlab",
"bytes": "774560"
},
{
"name": "Nginx",
"bytes": "749"
},
{
"name": "Objective-C",
"bytes": "702"
},
{
"name": "Python",
"bytes": "1293581"
},
{
"name": "Shell",
"bytes": "4490"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import hashlib
from helper.colorlogger import create_logger
from library.api.pgoapi.protos.POGOProtos import Enums_pb2 as Enums
from library.api.pgoapi.protos.POGOProtos.Inventory import \
Item_pb2 as Item_Enums
class Config(object):
def __init__(self, config, cli_args):
self.log = create_logger(__name__)
self.__config_data = config
self.config_data = config
self.__password = self.config_data.pop("password", 'NA')
self.location = config["location"]
self.auth_service = config["auth_service"]
self.username = config["username"]
self.gmaps_api_key = config.get("GMAPS_API_KEY", "")
self.step_size = config.get("BEHAVIOR", {}).get("STEP_SIZE", 200)
self.wander_steps = config.get("BEHAVIOR", {}).get("WANDER_STEPS", 0)
self.extra_wait = config.get("BEHAVIOR", {}).get("EXTRA_WAIT", 0.3)
self.sleep_mult = config.get("BEHAVIOR", {}).get("SLEEP_MULT", 1.5)
self.use_lucky_egg = config.get("BEHAVIOR", {}).get("AUTO_USE_LUCKY_EGG", False)
self.use_google = config.get("BEHAVIOR", {}).get("USE_GOOGLE", False)
self.skip_visited_fort_duration = config.get("BEHAVIOR", {}).get("SKIP_VISITED_FORT_DURATION", 600)
self.spin_all_forts = config.get("BEHAVIOR", {}).get("SPIN_ALL_FORTS", False)
self.stay_within_proximity = config.get("BEHAVIOR", {}).get("STAY_WITHIN_PROXIMITY",
9999999) # Stay within proximity
self.should_catch_pokemon = config.get("CAPTURE", {}).get("CATCH_POKEMON", True)
self.max_catch_attempts = config.get("CAPTURE", {}).get("MAX_CATCH_ATTEMPTS", 10)
self.min_failed_attempts_before_using_berry = config.get("CAPTURE", {}).get("MIN_FAILED_ATTEMPTS_BEFORE_USING_BERRY", 3)
pokeball_percent = config.get("CAPTURE", {}).get("USE_POKEBALL_IF_PERCENT", 50)
greatball_percent = config.get("CAPTURE", {}).get("USE_GREATBALL_IF_PERCENT", 50)
ultraball_percent = config.get("CAPTURE", {}).get("USE_ULTRABALL_IF_PERCENT", 50)
use_masterball = config.get("CAPTURE", {}).get("USE_MASTERBALL", False)
self.ball_priorities = [pokeball_percent, greatball_percent, ultraball_percent, use_masterball]
self.min_items = {}
for k, v in config.get("MIN_ITEMS", {}).items():
self.min_items[getattr(Item_Enums, k)] = v
self.pokemon_evolution = {}
self.pokemon_evolution_family = {}
for k, v in config.get("POKEMON_EVOLUTION", {}).items():
self.pokemon_evolution[getattr(Enums, k)] = v
self.pokemon_evolution_family[getattr(Enums, k)] = getattr(Enums, "FAMILY_" + k)
self.experimental = config.get("BEHAVIOR", {}).get("EXPERIMENTAL", False)
self.pokemon_cleanup_testing_mode = config.get('POKEMON_CLEANUP', {}).get('TESTING_MODE', False)
self.min_similar_pokemon = config.get("POKEMON_CLEANUP", {}).get("MIN_SIMILAR_POKEMON",
1) # Keep atleast one of everything.
self.keep_pokemon_ids = map(lambda x: getattr(Enums, x),
config.get("POKEMON_CLEANUP", {}).get("KEEP_POKEMON_NAMES", []))
self.release_method = config.get("POKEMON_CLEANUP", {}).get("RELEASE_METHOD", "CLASSIC")
self.release_method_conf = config.get("POKEMON_CLEANUP", {}).get("RELEASE_METHOD_" + self.release_method, {})
self.score_method = config.get("POKEMON_CLEANUP", {}).get("SCORE_METHOD", "CP")
self.score_settings = config.get("POKEMON_CLEANUP", {}).get("SCORE_METHOD_" + self.score_method, {})
self.egg_incubation_enabled = config.get("EGG_INCUBATION", {}).get("ENABLE", True)
self.use_disposable_incubators = config.get("EGG_INCUBATION", {}).get("USE_DISPOSABLE_INCUBATORS", False)
self.incubate_big_eggs_first = config.get("EGG_INCUBATION", {}).get("BIG_EGGS_FIRST", True)
self.farm_items_enabled = config.get("NEEDY_ITEM_FARMING", {}).get("ENABLE",
True and self.experimental) # be concious of pokeball/item limits
self.pokeball_continue_threshold = config.get("NEEDY_ITEM_FARMING", {}).get("POKEBALL_CONTINUE_THRESHOLD",
50) # keep at least 10 pokeballs of any assortment, otherwise go farming
self.pokeball_farm_threshold = config.get("NEEDY_ITEM_FARMING", {}).get("POKEBALL_FARM_THRESHOLD",
10) # at this point, go collect pokeballs
self.farm_ignore_pokeball_count = config.get("NEEDY_ITEM_FARMING", {}).get("FARM_IGNORE_POKEBALL_COUNT",
False) # ignore pokeballs in the continue tally
self.farm_ignore_greatball_count = config.get("NEEDY_ITEM_FARMING", {}).get("FARM_IGNORE_GREATBALL_COUNT",
False) # ignore greatballs in the continue tally
self.farm_ignore_ultraball_count = config.get("NEEDY_ITEM_FARMING", {}).get("FARM_IGNORE_ULTRABALL_COUNT",
False) # ignore ultraballs in the continue tally
self.farm_ignore_masterball_count = config.get("NEEDY_ITEM_FARMING", {}).get("FARM_IGNORE_MASTERBALL_COUNT",
True) # ignore masterballs in the continue tally
self.farm_override_step_size = config.get("NEEDY_ITEM_FARMING", {}).get("FARM_OVERRIDE_STEP_SIZE",
-1) # should the step size be overriden when looking for more inventory, -1 to disable
self._sanity_check_needy_item_farming()
self.explain_evolution_before_cleanup = config.get("CONSOLE_OUTPUT", {}).get("EXPLAIN_EVOLUTION_BEFORE_CLEANUP",
False) # explain individual evolution criteria in console
self.list_pokemon_before_cleanup = config.get("CONSOLE_OUTPUT", {}).get("LIST_POKEMON_BEFORE_CLEANUP",
False) # list pokemon in console
self.list_inventory_before_cleanup = config.get("CONSOLE_OUTPUT", {}).get("LIST_INVENTORY_BEFORE_CLEANUP",
True) # list inventory in console
self.show_steps = config.get("CONSOLE_OUTPUT", {}).get("SHOW_STEPS", True) # show steps walked in console
self.show_travel_link_with_steps = config.get("CONSOLE_OUTPUT", {}).get("SHOW_TRAVEL_LINK_WITH_STEPS", True)
self.show_distance_traveled = config.get("CONSOLE_OUTPUT", {}).get("SHOW_DISTANCE_TRAVELED", True)
self.show_nearest_fort_distance = config.get("CONSOLE_OUTPUT", {}).get("SHOW_NEAREST_FORT_DISTANCE", True)
self.notify_no_nearby_pokemon = config.get("CONSOLE_OUTPUT", {}).get("NOTIFY_NO_NEARBY_POKEMON", False)
self.log_colors = config.get("CONSOLE_OUTPUT", {}).get("COLORLOG",
{"FORT_WALKER": "blue",
"POKE_CATCHER": "green",
"RELEASE": "cyan",
"EVOLVE": "cyan",
"POKETRAINER": "yellow",
"INVENTORY": "purple"})
if cli_args['location']:
start_location = cli_args['location']
else:
start_location = self.location
self.cache_filename = './cache/cache ' + (hashlib.md5(start_location.encode())).hexdigest() + str(self.stay_within_proximity)
self.use_cache = config.get("BEHAVIOR", {}).get("USE_CACHED_FORTS", False)
self.cache_is_sorted = config.get("BEHAVIOR", {}).get("CACHED_FORTS_SORTED", False)
self.enable_caching = config.get("BEHAVIOR", {}).get("ENABLE_CACHING", False)
def _sanity_check_needy_item_farming(self):
# Sanity checking, farm_items is Experimental, and we needn't do this if we're farming anyway
self.farm_items_enabled = (self.farm_items_enabled and
self.experimental and
self.should_catch_pokemon)
if (self.farm_items_enabled and self.farm_ignore_pokeball_count and self.farm_ignore_greatball_count and self.farm_ignore_ultraball_count and self.farm_ignore_masterball_count):
self.farm_items_enabled = False
self.log.warn("FARM_ITEMS has been disabled due to all Pokeball counts being ignored.")
elif self.farm_items_enabled and not self.pokeball_farm_threshold < self.pokeball_continue_threshold:
self.farm_items_enabled = False
self.log.warn("FARM_ITEMS has been disabled due to farming threshold being below the continue. " +
"Set 'CATCH_POKEMON' to 'false' to enable captureless traveling.")
def get_password(self):
return self.__password
| {
"content_hash": "f57396f838aaaf8c7086daf6f60bdc90",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 185,
"avg_line_length": 72.23308270676692,
"alnum_prop": 0.5581346934526907,
"repo_name": "fallenpixel/poketrainer",
"id": "ea58b529f4ae2c1b5fa3133e0ca745dd986cd1bc",
"size": "9607",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "poketrainer/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12395"
},
{
"name": "Protocol Buffer",
"bytes": "90347"
},
{
"name": "Python",
"bytes": "213386"
},
{
"name": "Shell",
"bytes": "316"
}
],
"symlink_target": ""
} |
import numpy as np
import itertools
class SNThermodynamics(object):
def __init__(self):
self.Le = 4.1 * np.power(10., 51.)
self.Lebar = 4.3 * np.power(10., 51.)
self.Lmu = 7.9 * np.power(10., 51.)
self.EeAvg = 9.4
self.EeBarAvg = 13.
self.EmuAvg = 15.8
self.FluxE = self.Le / self.EeAvg
self.FluxEbar = self.Lebar / self.EeBarAvg
self.FluxMu = self.Lmu / self.EmuAvg
self.FluxTot = self.FluxE + self.FluxEbar + 4 * self.FluxMu
self.TE = 2.1
self.TEbar = 3.5
self.TMu = 4.4
self.EtaE = 3.9
self.EtaEbar = 2.3
self.EtaMu = 2.1
self.mu_0 = 0.45e5
self.R = 10.
self.deltaR = 0.25
self.RStar = 40.
self.u_start = (1. - (self.R / self.RStar) ** 2) ** 0.5
self.u_stop = 1.
self.N = 20
self.E_start = -70.
self.E_stop = 70.
self.delta_e = 0.2
self.Spectra = self.spectra()
self.x, self.u_prime, self.nu_e_dist, self.nu_mu_dist = np.array(list(zip(*self.Spectra)), dtype=np.float32)
self.unique_cosine_prime = np.unique(self.u_prime)
def f_nu_e(self, x):
"""
Electron neutrino initial SN flux
:param x: energy (+ for nu, - for anti nu)
:return: initial energy spectrum (numpy array)
"""
return (self.FluxE / self.FluxTot) * np.power(x / self.EeAvg, 2.) * \
np.power(1 + np.exp(x / self.TE - self.EtaE), -1)
def f_nu_e_bar(self, x):
"""
Anti-electron neutrino initial SN flux
:param x: energy (+ for nu, - for anti nu)
:return: initial energy spectrum (numpy array)
"""
return (self.FluxEbar / self.FluxTot) * np.power(x / self.EeBarAvg, 2.) * \
np.power(1 + np.exp(x / self.TEbar - self.EtaEbar), -1)
def f_nu_mu(self, x):
"""
Muon / tau neutrino initial SN flux
:param x: energy (+ for nu, - for anti nu)
:return: initial energy spectrum (numpy array)
"""
return (self.FluxMu / self.FluxTot) * np.power(x / self.EmuAvg, 2.) * \
np.power(1 + np.exp(x / self.TMu - self.EtaMu), -1)
def spectra(self):
"""
:return: total initial spectrum - including all flavors - as a function of energy and cosine of zenith (array)
"""
a = [(p, u, -self.f_nu_e_bar(-p), self.f_nu_mu(-p))
for p, u in itertools.product(
np.linspace(start=self.E_start, stop=0.2, num=int((0 - self.E_start) / self.delta_e)),
np.linspace(start=self.u_start, stop=self.u_stop, num=self.N)
)
]
a.extend(
[
(p, u, self.f_nu_e(p), self.f_nu_mu(p))
for p, u in itertools.product(
np.linspace(start=0.2, stop=self.E_stop, num=int(self.E_stop / self.delta_e)),
np.linspace(start=self.u_start, stop=self.u_stop, num=self.N)
)
]
)
a.extend(
[(0, u, self.f_nu_e(0), self.f_nu_mu(0)) for u in np.linspace(self.u_start, self.u_stop, self.N)]
)
return sorted(a)
def mu(self, r):
"""
Background neutrino density spectrum as a function of outward radius in the SN
:param r: radius
:return: Background neutrino density (float)
"""
return (4 / 3) * self.mu_0 * ((self.R / r) ** 3)
class Neutrino(SNThermodynamics):
def __init__(self):
super(Neutrino, self).__init__()
@staticmethod
def b_inverted():
"""
B-vector in the inverted mass hierarchy.
:return: 3rd and 8th components of B
"""
# mixing angles and CPV phase
c12 = np.cos(33.2 * np.pi/180)
c23 = np.cos(40. * np.pi/180)
c13 = np.cos(8.6 * np.pi/180)
s12 = np.sin(33.2 * np.pi/180)
s23 = np.sin(40. * np.pi/180)
s13 = np.sin(8.6 * np.pi/180)
c_cp = np.cos(300 * np.pi/180)
s_cp = np.sin(300 * np.pi/180)
# mass splitting ratio
a = 7.5 * np.power(10., -5) / (7.5 * np.power(10., -5) - 2.43 * np.power(10., -3))
# 3rd component of the vacuum B-vec
b3 = s13 ** 2 - s23 ** 2 * c13 ** 2 + \
a * (s12 ** 2 * c13 ** 2 - (c12 * c23 - s12 * s13 * s23 * c_cp) ** 2 - (s12 * s13 * s23 * s_cp) ** 2)
# 8th component of the vacuum B-vec
b8 = 3 ** 0.5 * (
s13 ** 2 + s23 ** 2 * c13 ** 2 +
a * (s12 ** 2 * c13 ** 2 + (c12 * c23 - s12 * s13 * s23 * c_cp)**2 + (s12 * s13 * s23 * s_cp) ** 2) -
(2 / 3) * (1 + a)
)
return b3, b8
def sine_func(self, energy_momentum, x_momentum, cosine, u_prime, n):
"""
Computes sine function which appears inside of the main evolution integral. All coefficients are computed from
various tensor contractions with the SU(3) structure tensor.
:param energy_momentum: un-integrated neutrino energy (float)
:param x_momentum: background neutrino energy to be integrated (float)
:param cosine: un-integrate propagation cosine of zenith angle
:param u_prime: cosine of zenith angle of background neutrino(s)
:param n: integer which evolves the solution outward, radially in discrete steps
:return: sine function expansion of vacuum oscillation terms (numpy array)
"""
if n == 1:
return np.zeros_like(cosine)
ratio = (1 / (energy_momentum * cosine) - 1 / (x_momentum * u_prime))
ratio[energy_momentum == 0] = 0
d = (1 / cosine - u_prime) * (
0.05119 * np.sin(0.3793 * (n - 1) * self.deltaR * ratio) +
0.12250 * np.sin(11.895 * (n - 1) * self.deltaR * ratio) +
0.05410 * np.sin(12.274 * (n - 1) * self.deltaR * ratio)
)
# NaNs can/should be set to 0 in this calculation due to the flux
d[np.isnan(d)] = 0
return d
def euler(self, cosine, n, b_orthogonal, log_lam, integral):
"""
Solution of one iteration of the Euler method for solving the differential equation of motion.
:param cosine: un-integrate propagation cosine of zenith angle
:param n: integer which evolves the solution outward, radially in discrete steps
:param b_orthogonal: orthogonal component of the vacuum oscillation "magnetic field" vector in SU(3) space
:param log_lam: logarithm of lambda function which controls collective oscillations
:param integral: results of Riemannian integration at each step
:return: solution to EOMs at each radius and angle
"""
if cosine < np.power(1. - np.power((self.R / (self.RStar + (n - 1) * self.deltaR)), 2), 0.5):
return 0
radius = self.RStar + (n - 1) * self.deltaR
return log_lam - self.deltaR * self.mu(r=radius) / b_orthogonal * integral
def make_tables(self, energy_momentum, cosine, n, log_lambda):
"""
Creates array which will be integrated.
:param energy_momentum: un-integrated neutrino energy (float)
:param cosine: un-integrate propagation cosine of zenith angle
:param n: integer which evolves the solution outward, radially in discrete steps
:param log_lambda: logarithm of the lambda function from the previous solution
:return: Integrated function (numpy array)
"""
log_lam = log_lambda[:, -1]
lambda_func = np.exp(log_lam)
# Sets numpy arrays for the integrand
spectra = (self.nu_e_dist - self.nu_mu_dist) * lambda_func * self.sine_func(
energy_momentum=energy_momentum,
x_momentum=self.x,
cosine=cosine,
u_prime=self.u_prime,
n=n
)
# reshape the spectra with cosine steps across the columns,
# this is preparing to integrate separately along each dimension
integrand_array = spectra.reshape(-1, self.N)
# Performs first integral over cosine by performing the trapezoidal algorithm over each row of the matrix.
cos_int = np.trapz(integrand_array, x=self.unique_cosine_prime, axis=1)
assert isinstance(cos_int, np.ndarray)
cos_int[np.isnan(cos_int)] = 0
return np.trapz(cos_int, dx=0.2)
| {
"content_hash": "00bd63899404bf3bbb07dcd205266205",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 119,
"avg_line_length": 37.123893805309734,
"alnum_prop": 0.5567342073897497,
"repo_name": "brainsqueeze/bipolar_osc",
"id": "de2d93bdfc0e8cec45418b89661fc3944d4282db",
"size": "8390",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/workers/sn_neutrino.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12611"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from api.models import GameRoom, Player, Question, Answer
class GameRoomSerializer(serializers.ModelSerializer):
class Meta:
model = GameRoom
fields = '__all__'
write_only_fields = ('password')
extra_kwargs = {
'name': {
'validators': []
}
}
class PlayerSerializer(serializers.ModelSerializer):
game_room = GameRoomSerializer() # specify this for nested serialization
def create(self, validated_data):
game_room_data = validated_data.pop('game_room')
return Player.objects.create(game_room=GameRoom.objects.get(**game_room_data), **validated_data)
class Meta:
model = Player
exclude = ('question_master', 'answer_detective')
depth = 1
class QuestionSerializer(serializers.ModelSerializer):
creator = serializers.PrimaryKeyRelatedField(queryset=Player.objects.all())
class Meta:
model = Question
fields = '__all__'
class AnswerSerializer(serializers.ModelSerializer):
creator = serializers.PrimaryKeyRelatedField(queryset=Player.objects.all())
question = serializers.PrimaryKeyRelatedField(queryset=Question.objects.all())
class Meta:
model = Answer
fields = '__all__'
class AnswerSerializerDepth(serializers.ModelSerializer):
creator = PlayerSerializer()
question = serializers.PrimaryKeyRelatedField(queryset=Question.objects.all())
class Meta:
model = Answer
fields = '__all__'
depth = 1 | {
"content_hash": "5f05cb0a0a3ecdf301ca92d42f7574ce",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 104,
"avg_line_length": 29.037037037037038,
"alnum_prop": 0.6651785714285714,
"repo_name": "kevincianfarini/whosaidit",
"id": "982fc3318cdc97d453ffb6604b05685e5aa3d5bc",
"size": "1568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37350"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
def randomColor():
color = np.random.randint(0, 255,(1, 3))
return color[0].tolist()
def perspectiveCorrection(img1Path, img2Path ,patternSize ):
img1 = cv.imread(cv.samples.findFile(img1Path))
img2 = cv.imread(cv.samples.findFile(img2Path))
# [find-corners]
ret1, corners1 = cv.findChessboardCorners(img1, patternSize)
ret2, corners2 = cv.findChessboardCorners(img2, patternSize)
# [find-corners]
if not ret1 or not ret2:
print("Error, cannot find the chessboard corners in both images.")
sys.exit(-1)
# [estimate-homography]
H, _ = cv.findHomography(corners1, corners2)
print(H)
# [estimate-homography]
# [warp-chessboard]
img1_warp = cv.warpPerspective(img1, H, (img1.shape[1], img1.shape[0]))
# [warp-chessboard]
img_draw_warp = cv.hconcat([img2, img1_warp])
cv.imshow("Desired chessboard view / Warped source chessboard view", img_draw_warp )
corners1 = corners1.tolist()
corners1 = [a[0] for a in corners1]
# [compute-transformed-corners]
img_draw_matches = cv.hconcat([img1, img2])
for i in range(len(corners1)):
pt1 = np.array([corners1[i][0], corners1[i][1], 1])
pt1 = pt1.reshape(3, 1)
pt2 = np.dot(H, pt1)
pt2 = pt2/pt2[2]
end = (int(img1.shape[1] + pt2[0]), int(pt2[1]))
cv.line(img_draw_matches, tuple([int(j) for j in corners1[i]]), end, randomColor(), 2)
cv.imshow("Draw matches", img_draw_matches)
cv.waitKey(0)
# [compute-transformed-corners]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-I1', "--image1", help="Path to the first image", default="left02.jpg")
parser.add_argument('-I2', "--image2", help="Path to the second image", default="left01.jpg")
parser.add_argument('-H', "--height", help="Height of pattern size", default=6)
parser.add_argument('-W', "--width", help="Width of pattern size", default=9)
args = parser.parse_args()
img1Path = args.image1
img2Path = args.image2
h = args.height
w = args.width
perspectiveCorrection(img1Path, img2Path, (w, h))
if __name__ == "__main__":
main()
| {
"content_hash": "3079a59bbb3b953b6c337a151a8a9afb",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 97,
"avg_line_length": 32.52857142857143,
"alnum_prop": 0.6416337285902504,
"repo_name": "opencv/opencv",
"id": "184c44efd866cd5933e2ab01346e2bd2d594072d",
"size": "2351",
"binary": false,
"copies": "2",
"ref": "refs/heads/4.x",
"path": "samples/python/tutorial_code/features2D/Homography/perspective_correction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "1986"
},
{
"name": "Batchfile",
"bytes": "1498"
},
{
"name": "C",
"bytes": "1543870"
},
{
"name": "C++",
"bytes": "35975082"
},
{
"name": "CMake",
"bytes": "1010867"
},
{
"name": "Cuda",
"bytes": "333437"
},
{
"name": "Dockerfile",
"bytes": "309"
},
{
"name": "HTML",
"bytes": "40027"
},
{
"name": "Java",
"bytes": "774232"
},
{
"name": "JavaScript",
"bytes": "233673"
},
{
"name": "Kotlin",
"bytes": "5204"
},
{
"name": "Objective-C",
"bytes": "100731"
},
{
"name": "Objective-C++",
"bytes": "392600"
},
{
"name": "Perl",
"bytes": "15865"
},
{
"name": "PowerShell",
"bytes": "14591"
},
{
"name": "Prolog",
"bytes": "843"
},
{
"name": "Python",
"bytes": "1038154"
},
{
"name": "Shell",
"bytes": "22738"
},
{
"name": "Swift",
"bytes": "301765"
},
{
"name": "TeX",
"bytes": "3530"
}
],
"symlink_target": ""
} |
import copy
import unittest
from scrapy.exceptions import DropItem
from FourmiCrawler import pipelines, spider, items
class TestPipelines(unittest.TestCase):
def setUp(self):
self.testItem = items.Result()
def test_none_pipeline(self):
# Testing the pipeline that replaces the None values in items.
self.testItem["value"] = "abc"
self.testItem["source"] = None
pipe = pipelines.RemoveNonePipeline()
processed = pipe.process_item(self.testItem, spider.FourmiSpider())
self.assertTrue(processed["value"] == "abc")
for key in self.testItem:
self.assertIsNotNone(processed[key])
if key is not "value":
self.assertIs(processed[key], "")
def test_duplicate_pipeline(self):
# Testing the pipeline that removes duplicates.
self.testItem["attribute"] = "test"
self.testItem["value"] = "test"
self.testItem["conditions"] = "test"
pipe = pipelines.DuplicatePipeline()
self.assertEqual(pipe.process_item(self.testItem, spider.FourmiSpider()), self.testItem)
self.assertRaises(DropItem, pipe.process_item, self.testItem, spider.FourmiSpider())
other_item = copy.deepcopy(self.testItem)
other_item["value"] = "test1"
self.assertEqual(pipe.process_item(other_item, spider.FourmiSpider()), other_item)
def test_attribute_selection(self):
# Testing the pipeline that selects attributes.
item1 = copy.deepcopy(self.testItem)
item2 = copy.deepcopy(self.testItem)
item1["attribute"] = "abd"
item2["attribute"] = "abc"
s = spider.FourmiSpider(selected_attributes=["a.d"])
pipe = pipelines.AttributeSelectionPipeline()
self.assertEqual(pipe.process_item(item1, s), item1)
self.assertRaises(DropItem, pipe.process_item, item2, s) | {
"content_hash": "fbb62dc5b0eee70dc453aa43db17b6ea",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 96,
"avg_line_length": 35.698113207547166,
"alnum_prop": 0.6569767441860465,
"repo_name": "jjdekker/Fourmi",
"id": "eb2b0702b33bb943d336e38055a623644e92b688",
"size": "1892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74859"
}
],
"symlink_target": ""
} |
'''
detect cyclic parent/child, and insert weakref
'''
class Parent:
def __init__(self, y:int, children:[]Child ):
self.children = children
self.y = y
def create_child(self, x:int, parent:Parent) ->Child:
child = Child(x, parent)
self.children.push_back( child )
return child
def say(self, msg:string):
print(msg)
class Child:
def __init__(self, x:int, parent:Parent ):
self.x = x
self.parent = parent
def foo(self) ->int:
'''
It is also valid to use `par=self.parent`,
but it is more clear to use `weakref.unwrap(self.parent)`
'''
par = weak.unwrap(self.parent)
if par is not None:
return self.x * par.y
else:
print('parent is gone..')
def bar(self):
'''
below `self.parent` is directly used in expressions,
and not first assigned to a variable.
for each use of self.parent the weakref will be promoted
to a shared pointer, and then fall out of scope,
which is slower than above.
'''
self.parent.say('hello parent')
print(self.parent.y)
def main():
#children = []Child(None,None)
children = []Child()
p = Parent( 1000, children )
print 'parent:', p
c1 = p.create_child(1, p)
c2 = p.create_child(2, p)
c3 = p.create_child(3, p)
print 'children:'
print c1
print c2
print c3 | {
"content_hash": "e1fe2df0609813c0cc53ab104218c9c3",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 59,
"avg_line_length": 21.964912280701753,
"alnum_prop": 0.65814696485623,
"repo_name": "tempbottle/Rusthon",
"id": "ffe3a4e926d1b161f41462481485f0413aab32e4",
"size": "1252",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "regtests/c++/cyclic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "23667"
},
{
"name": "HTML",
"bytes": "44433"
},
{
"name": "JavaScript",
"bytes": "29887"
},
{
"name": "Perl",
"bytes": "66040"
},
{
"name": "Python",
"bytes": "1024366"
}
],
"symlink_target": ""
} |
if __name__ != "__main__":
# place library initialization code here
pass
else:
from lib.Functions import Exit
Exit.printThisIsNoExecutableFile("The PoC-Library - Python Module Simulator.vSimSimulator")
# load dependencies
from pathlib import Path
from Base.Exceptions import *
from Simulator.Base import PoCSimulator
from Simulator.Exceptions import *
class Simulator(PoCSimulator):
__executables = {}
__vhdlStandard = "93"
__guiMode = False
def __init__(self, host, showLogs, showReport, vhdlStandard, guiMode):
super(self.__class__, self).__init__(host, showLogs, showReport)
self.__vhdlStandard = vhdlStandard
self.__guiMode = guiMode
if (host.platform == "Windows"):
self.__executables['vlib'] = "vlib.exe"
self.__executables['vcom'] = "vcom.exe"
self.__executables['vsim'] = "vsim.exe"
elif (host.platform == "Linux"):
self.__executables['vlib'] = "vlib"
self.__executables['vcom'] = "vcom"
self.__executables['vsim'] = "vsim"
else:
raise PlatformNotSupportedException(self.platform)
def run(self, pocEntity):
import os
import re
import subprocess
self.printNonQuiet(str(pocEntity))
self.printNonQuiet(" preparing simulation environment...")
# create temporary directory for vSim if not existent
tempvSimPath = self.host.directories["vSimTemp"]
if not (tempvSimPath).exists():
self.printVerbose("Creating temporary directory for simulator files.")
self.printDebug("Temporary directors: %s" % str(tempvSimPath))
tempvSimPath.mkdir(parents=True)
# setup all needed paths to execute fuse
vLibExecutablePath = self.host.directories["vSimBinary"] / self.__executables['vlib']
vComExecutablePath = self.host.directories["vSimBinary"] / self.__executables['vcom']
vSimExecutablePath = self.host.directories["vSimBinary"] / self.__executables['vsim']
# gtkwExecutablePath = self.host.directories["GTKWBinary"] / self.__executables['gtkwave']
if not self.host.tbConfig.has_section(str(pocEntity)):
from configparser import NoSectionError
raise SimulatorException("Testbench '" + str(pocEntity) + "' not found.") from NoSectionError(str(pocEntity))
testbenchName = self.host.tbConfig[str(pocEntity)]['TestbenchModule']
fileListFilePath = self.host.directories["PoCRoot"] / self.host.tbConfig[str(pocEntity)]['fileListFile']
tclBatchFilePath = self.host.directories["PoCRoot"] / self.host.tbConfig[str(pocEntity)]['vSimBatchScript']
tclGUIFilePath = self.host.directories["PoCRoot"] / self.host.tbConfig[str(pocEntity)]['vSimGUIScript']
tclWaveFilePath = self.host.directories["PoCRoot"] / self.host.tbConfig[str(pocEntity)]['vSimWaveScript']
# vcdFilePath = tempvSimPath / (testbenchName + ".vcd")
# gtkwSaveFilePath = self.host.directories["PoCRoot"] / self.host.tbConfig[str(pocEntity)]['gtkwaveSaveFile']
if (self.verbose):
print(" Commands to be run:")
print(" 1. Change working directory to temporary directory")
print(" 2. Parse filelist file.")
print(" a) For every file: Add the VHDL file to vSim's compile cache.")
if (self.host.platform == "Windows"):
print(" 3. Compile and run simulation")
elif (self.host.platform == "Linux"):
print(" 3. Compile simulation")
print(" 4. Run simulation")
print(" ----------------------------------------")
# change working directory to temporary iSim path
self.printVerbose(' cd "%s"' % str(tempvSimPath))
os.chdir(str(tempvSimPath))
# parse project filelist
filesLineRegExpStr = r"\s*(?P<Keyword>(vhdl(\-(87|93|02|08))?|xilinx))" # Keywords: vhdl[-nn], xilinx
filesLineRegExpStr += r"\s+(?P<VHDLLibrary>[_a-zA-Z0-9]+)" # VHDL library name
filesLineRegExpStr += r"\s+\"(?P<VHDLFile>.*?)\"" # VHDL filename without "-signs
filesLineRegExp = re.compile(filesLineRegExpStr)
self.printDebug("Reading filelist '%s'" % str(fileListFilePath))
self.printNonQuiet(" running analysis for every vhdl file...")
# add empty line if logs are enabled
if self.showLogs: print()
vhdlLibraries = {}
with fileListFilePath.open('r') as fileFileHandle:
for line in fileFileHandle:
filesLineRegExpMatch = filesLineRegExp.match(line)
if (filesLineRegExpMatch is not None):
if (filesLineRegExpMatch.group('Keyword') == "vhdl"):
vhdlFileName = filesLineRegExpMatch.group('VHDLFile')
vhdlFilePath = self.host.directories["PoCRoot"] / vhdlFileName
elif (filesLineRegExpMatch.group('Keyword')[0:5] == "vhdl-"):
if (filesLineRegExpMatch.group('Keyword')[-2:] == self.__vhdlStandard):
vhdlFileName = filesLineRegExpMatch.group('VHDLFile')
vhdlFilePath = self.host.directories["PoCRoot"] / vhdlFileName
elif (filesLineRegExpMatch.group('Keyword') == "xilinx"):
self.printVerbose(" skipped xilinx specific file: '%s'" % filesLineRegExpMatch.group('VHDLFile'))
vhdlLibraryName = filesLineRegExpMatch.group('VHDLLibrary')
if (not vhdlLibraries.__contains__(vhdlLibraryName)):
# assemble vlib command as list of parameters
parameterList = [str(vLibExecutablePath), vhdlLibraryName]
command = " ".join(parameterList)
self.printDebug("call vlib: %s" % str(parameterList))
self.printVerbose(" command: %s" % command)
try:
vLibLog = subprocess.check_output(parameterList, stderr=subprocess.STDOUT, shell=False, universal_newlines=True)
except subprocess.CalledProcessError as ex:
print("ERROR while executing vlib: %s" % str(vhdlFilePath))
print("Return Code: %i" % ex.returncode)
print("--------------------------------------------------------------------------------")
print(ex.output)
if self.showLogs:
if (vLibLog != ""):
print("vlib messages for : %s" % str(vhdlFilePath))
print("--------------------------------------------------------------------------------")
print(vLibLog)
#
if (not vhdlFilePath.exists()):
raise SimulatorException("Can not compile '" + vhdlFileName + "'.") from FileNotFoundError(str(vhdlFilePath))
if (self.__vhdlStandard == "87"):
vhdlStandard = "-87"
elif (self.__vhdlStandard == "93"):
vhdlStandard = "-93"
elif (self.__vhdlStandard == "02"):
vhdlStandard = "-2002"
elif (self.__vhdlStandard == "08"):
vhdlStandard = "-2008"
# assemble vcom command as list of parameters
parameterList = [
str(vComExecutablePath),
'-rangecheck',
'-l', 'vcom.log',
vhdlStandard,
'-work', vhdlLibraryName,
str(vhdlFilePath)
]
command = " ".join(parameterList)
self.printDebug("call vcom: %s" % str(parameterList))
self.printVerbose(" command: %s" % command)
try:
vComLog = subprocess.check_output(parameterList, stderr=subprocess.STDOUT, shell=False, universal_newlines=True)
except subprocess.CalledProcessError as ex:
print("ERROR while executing vcom: %s" % str(vhdlFilePath))
print("Return Code: %i" % ex.returncode)
print("--------------------------------------------------------------------------------")
print(ex.output)
if self.showLogs:
if (vComLog != ""):
print("vcom messages for : %s" % str(vhdlFilePath))
print("--------------------------------------------------------------------------------")
print(vComLog)
# running simulation
# ==========================================================================
simulatorLog = ""
# run vSim simulation on Windows
self.printNonQuiet(" running simulation...")
parameterList = [
str(vSimExecutablePath),
'-vopt',
'-t', '1fs',
]
# append RUNOPTS to save simulation results to *.vcd file
if (self.__guiMode):
parameterList += ['-title', testbenchName]
if (tclWaveFilePath.exists()):
self.printDebug("Found waveform script: '%s'" % str(tclWaveFilePath))
parameterList += ['-do', ('do {%s}; do {%s}' % (str(tclWaveFilePath), str(tclGUIFilePath)))]
else:
self.printDebug("Didn't find waveform script: '%s'. Loading default commands." % str(tclWaveFilePath))
parameterList += ['-do', ('add wave *; do {%s}' % str(tclGUIFilePath))]
else:
parameterList += [
'-c',
'-do', str(tclBatchFilePath)
]
# append testbench name
parameterList += [('test.%s' % testbenchName)]
command = " ".join(parameterList)
self.printDebug("call vsim: %s" % str(parameterList))
self.printVerbose(" command: %s" % command)
try:
simulatorLog = subprocess.check_output(parameterList, stderr=subprocess.STDOUT, shell=False, universal_newlines=True)
except subprocess.CalledProcessError as ex:
print("ERROR while executing vsim command: %s" % command)
print("Return Code: %i" % ex.returncode)
print("--------------------------------------------------------------------------------")
print(ex.output)
#
if self.showLogs:
if (simulatorLog != ""):
print("vsim messages for : %s" % str(vhdlFilePath))
print("--------------------------------------------------------------------------------")
print(simulatorLog)
print()
if (not self.__guiMode):
try:
result = self.checkSimulatorOutput(simulatorLog)
if (result == True):
print("Testbench '%s': PASSED" % testbenchName)
else:
print("Testbench '%s': FAILED" % testbenchName)
except SimulatorException as ex:
raise TestbenchException("PoC.ns.module", testbenchName, "'SIMULATION RESULT = [PASSED|FAILED]' not found in simulator output.") from ex
# else: # guiMode
# # run GTKWave GUI
# self.printNonQuiet(" launching GTKWave...")
#
# parameterList = [
# str(gtkwExecutablePath),
# ('--dump=%s' % vcdFilePath)
# ]
#
# # if GTKWave savefile exists, load it's settings
# if gtkwSaveFilePath.exists():
# parameterList += ['--save', str(gtkwSaveFilePath)]
#
# command = " ".join(parameterList)
#
# self.printDebug("call GTKWave: %s" % str(parameterList))
# self.printVerbose(" command: %s" % command)
# try:
# gtkwLog = subprocess.check_output(parameterList, stderr=subprocess.STDOUT, shell=False, universal_newlines=True)
# except subprocess.CalledProcessError as ex:
# print("ERROR while executing GTKWave command: %s" % command)
# print("Return Code: %i" % ex.returncode)
# print("--------------------------------------------------------------------------------")
# print(ex.output)
# #
# if self.showLogs:
# if (gtkwLog != ""):
# print("GTKWave messages:")
# print("--------------------------------------------------------------------------------")
# print(gtkwLog)
| {
"content_hash": "202be00f805c439a98c3bd3b182813c7",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 140,
"avg_line_length": 38.83393501805054,
"alnum_prop": 0.6146695175234731,
"repo_name": "hoangt/PoC",
"id": "8836f72ab5c371ebc9f9bd8544665f21d2cbd607",
"size": "11999",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/Simulator/QuestaSimulator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "16745"
},
{
"name": "Python",
"bytes": "176923"
},
{
"name": "Shell",
"bytes": "15438"
},
{
"name": "Tcl",
"bytes": "100"
},
{
"name": "VHDL",
"bytes": "847597"
}
],
"symlink_target": ""
} |
"""Config flow for Nightscout integration."""
from asyncio import TimeoutError as AsyncIOTimeoutError
import logging
from aiohttp import ClientError, ClientResponseError
from py_nightscout import Api as NightscoutAPI
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_API_KEY, CONF_URL
from .const import DOMAIN
from .utils import hash_from_url
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({vol.Required(CONF_URL): str, vol.Optional(CONF_API_KEY): str})
async def _validate_input(data):
"""Validate the user input allows us to connect."""
url = data[CONF_URL]
api_key = data.get(CONF_API_KEY)
try:
api = NightscoutAPI(url, api_secret=api_key)
status = await api.get_server_status()
if status.settings.get("authDefaultRoles") == "status-only":
await api.get_sgvs()
except ClientResponseError as error:
raise InputValidationError("invalid_auth") from error
except (ClientError, AsyncIOTimeoutError, OSError) as error:
raise InputValidationError("cannot_connect") from error
# Return info to be stored in the config entry.
return {"title": status.name}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Nightscout."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
unique_id = hash_from_url(user_input[CONF_URL])
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
try:
info = await _validate_input(user_input)
except InputValidationError as error:
errors["base"] = error.base
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class InputValidationError(exceptions.HomeAssistantError):
"""Error to indicate we cannot proceed due to invalid input."""
def __init__(self, base: str):
"""Initialize with error base."""
super().__init__()
self.base = base
| {
"content_hash": "9a489a19894805ed93acf0b9b77f1b19",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 88,
"avg_line_length": 34.68493150684932,
"alnum_prop": 0.6571879936808847,
"repo_name": "adrienbrault/home-assistant",
"id": "2b91395d37733ff8d1d43778108bad74e9c99ca9",
"size": "2532",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/nightscout/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.