text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import string, cgi, time, requests
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
# import pri
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
if self.path.endswith(".api"):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
if self.path.endswith("init.api"):
r = requests.post('https://plex.tv/users/sign_in.xml',
data={'user[login]': 'jschuler99@gmail.com', 'user[password]': '%3PeAIO$d&l9'},
headers={'X-Plex-Client-Identifier': 'HDKGKLS-FDHSKL-HSJDLFLJDKS',
'X-Plex-Product': 'ACDMedia', 'X-Plex-Version': '1.0'})
resp = r.text
self.wfile.write(resp)
return
if self.path.endswith(".html"):
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".ico"):
f = open(self.path[1:])
self.send_response(200)
self.send_header('Content-type', 'image/x-icon')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".css"):
f = open(self.path[1:])
self.send_response(200)
self.send_header('Content-type', 'text/css')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".js"):
f = open(self.path[1:])
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
if self.path.endswith(".esp"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("hey, today is the" + str(time.localtime()[7]))
self.wfile.write(" day in the year " + str(time.localtime()[0]))
return
if self.path == '/':
f = open('index.html')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
f.close()
return
return
except IOError:
self.send_error(404, 'File Not Found: %s' % self.path)
def do_POST(self):
global rootnode
try:
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
query = cgi.parse_multipart(self.rfile, pdict)
self.send_response(301)
self.end_headers()
upfilecontent = query.get('upfile')
print "filecontent", upfilecontent[0]
self.wfile.write("<HTML>POST OK.<BR><BR>");
self.wfile.write(upfilecontent[0]);
except:
pass
def main():
try:
server = HTTPServer(('', 62146), MyHandler)
print 'started httpserver...'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main()
|
{
"content_hash": "2d9c2c12ca62d6368ca15731bcce7a76",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 117,
"avg_line_length": 36.96153846153846,
"alnum_prop": 0.4815296566077003,
"repo_name": "lsnow2017/ACDMedia",
"id": "656d231a5ca57c2e37d49abaae3d497dee9179a8",
"size": "3883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30220"
},
{
"name": "HTML",
"bytes": "6767"
},
{
"name": "JavaScript",
"bytes": "302060"
},
{
"name": "Python",
"bytes": "4897"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
from logging import getLogger
from textwrap import dedent
log = getLogger(__name__)
def dals(string):
"""dedent and left-strip"""
return dedent(string).lstrip()
def _get_attr(obj, attr_name, aliases=()):
try:
return getattr(obj, attr_name)
except AttributeError:
for alias in aliases:
try:
return getattr(obj, alias)
except AttributeError:
continue
else:
raise
def find_or_none(key, search_maps, aliases=(), _map_index=0):
"""Return the value of the first key found in the list of search_maps,
otherwise return None.
Examples:
>>> from .collection import AttrDict
>>> d1 = AttrDict({'a': 1, 'b': 2, 'c': 3, 'e': None})
>>> d2 = AttrDict({'b': 5, 'e': 6, 'f': 7})
>>> find_or_none('c', (d1, d2))
3
>>> find_or_none('f', (d1, d2))
7
>>> find_or_none('b', (d1, d2))
2
>>> print(find_or_none('g', (d1, d2)))
None
>>> find_or_none('e', (d1, d2))
6
"""
try:
attr = _get_attr(search_maps[_map_index], key, aliases)
return attr if attr is not None else find_or_none(key, search_maps[1:], aliases)
except AttributeError:
# not found in current map object, so go to next
return find_or_none(key, search_maps, aliases, _map_index+1)
except IndexError:
# ran out of map objects to search
return None
def find_or_raise(key, search_maps, aliases=(), _map_index=0):
try:
attr = _get_attr(search_maps[_map_index], key, aliases)
return attr if attr is not None else find_or_raise(key, search_maps[1:], aliases)
except AttributeError:
# not found in current map object, so go to next
return find_or_raise(key, search_maps, aliases, _map_index+1)
except IndexError:
# ran out of map objects to search
raise AttributeError()
|
{
"content_hash": "16fd96488964979bf2e96be2818d5e1a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 89,
"avg_line_length": 30.742424242424242,
"alnum_prop": 0.5736816165598817,
"repo_name": "zooba/PTVS",
"id": "69d31b8d58bdadbcacd2f3a7f7a90c78ce4119b8",
"size": "2053",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/_vendor/auxlib/ish.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12390821"
},
{
"name": "C++",
"bytes": "209386"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "888412"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from pipeline.compressors import CompressorBase
class SlimItCompressor(CompressorBase):
"""
JS compressor based on the Python library slimit
(http://pypi.python.org/pypi/slimit/).
"""
def compress_js(self, js):
from slimit import minify
return minify(js)
|
{
"content_hash": "17b996e992b9ed01641fa737df7074e0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 25.615384615384617,
"alnum_prop": 0.6936936936936937,
"repo_name": "hyperoslo/django-pipeline",
"id": "8a9a600637f54e5ba82910162479d3559f96d0ee",
"size": "333",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pipeline/compressors/slimit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1001"
},
{
"name": "CoffeeScript",
"bytes": "52"
},
{
"name": "JavaScript",
"bytes": "140"
},
{
"name": "Python",
"bytes": "80229"
},
{
"name": "Shell",
"bytes": "4529"
}
],
"symlink_target": ""
}
|
from py3oauth2 import message
from py3oauth2.authorizationcodegrant import (
AccessTokenRequest,
AuthorizationRequest,
)
from oidc.idtoken import IDToken as BaseIDToken
__all__ = ['IDToken', 'AuthenticationRequest', 'AccessTokenRequest']
class AuthenticationRequest(AuthorizationRequest):
# OAuth2.0 parameters
response_type = message.Parameter(str, required=True)
scope = message.Parameter(str, required=True)
redirect_uri = message.Parameter(str, required=True)
# OAuth2.0 Multiple Response Type Encoding Practices
response_mode = message.Parameter(str)
# OpenID Connect parameters
nonce = message.Parameter(str, recommended=True)
display = message.Parameter(str)
prompt = message.Parameter(str)
max_age = message.Parameter(int)
ui_locales = message.Parameter(str)
id_token_hint = message.Parameter(str)
login_hint = message.Parameter(str)
acr_values = message.Parameter(str)
class AccessTokenResponse(message.AccessTokenResponse):
id_token = message.Parameter(str, required=True)
class IDToken(BaseIDToken):
at_hash = message.Parameter(str)
class AccessTokenRequest(AccessTokenRequest):
response = AccessTokenResponse
id_token = IDToken
def answer(self, provider, owner):
response = super(AccessTokenRequest, self).answer(provider, owner)
client = provider.store.get_client(self.client_id)
access_token = provider.store.get_access_token(response.access_token)
id_token = self.id_token(response,
provider.get_iss(),
access_token.get_owner().get_sub(),
client.get_id(),
provider.get_id_token_lifetime())
id_token.at_hash = provider.left_hash(client.get_jws_alg(),
response.access_token)
response.id_token =\
provider.encode_token(id_token, client, response.access_token)
return response
|
{
"content_hash": "3c58bc4a11d5d1c3c7e5f4a5e04d0e45",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 77,
"avg_line_length": 34.52542372881356,
"alnum_prop": 0.6607756504663721,
"repo_name": "GehirnInc/python-oidc",
"id": "876d1ff36e2a96c79d84cd1dd23b5c8eb95ebc5b",
"size": "2062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oidc/authorizationcodeflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35074"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['PolyTrend'] , ['Seasonal_WeekOfYear'] , ['MLP'] );
|
{
"content_hash": "75a218f1b1dda3f4beeeb307b3a9cb1e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 85,
"avg_line_length": 39.5,
"alnum_prop": 0.7088607594936709,
"repo_name": "antoinecarme/pyaf",
"id": "54b8634a9572e4f3cc83420502ae840963d614ca",
"size": "158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_PolyTrend_Seasonal_WeekOfYear_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from check_link import is_absolute
from domain import get_domain
from check_link import check_link # Todo: not used
def get_button_str(link, text):
return ' <a href="%s" target="_blank" class="btn btn-outline-secondary btn-sm" role="button">' % link + ' %s' % text + '</a>\n'
def get_button_str_inplace(link, text):
return ' <a href="%s" class="btn btn-outline-secondary btn-sm" role="button">' % link + '%s' % text + '</a>\n'
def get_button_str_all(record):
# this order in this function will determine the order of buttons
button = ''
if 'web' in record:
button += get_button_str(record['web'], 'Project Page')
if 'web_inplace' in record:
button += get_button_str_inplace(record['web_inplace'], 'Project Page')
if 'pdf' in record:
button += get_button_str(record['pdf'], 'Paper')
if 'supp' in record:
button += get_button_str(record['supp'], 'Supplement')
if 'poster' in record:
button += get_button_str(record['poster'], 'Poster')
if 'code' in record:
button += get_button_str(record['code'], 'Code')
if 'executable' in record:
button += get_button_str(record['executable'], 'Executable')
if 'data' in record:
button += get_button_str(record['data'], 'Data')
if 'video' in record:
button += get_button_str(record['video'], 'Video')
if 'tutorial' in record:
button += get_button_str(record['tutorial'], 'Tutorial')
return button
|
{
"content_hash": "d2ed5d2270fb888fa01c9ea49147e3f3",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 132,
"avg_line_length": 40.5,
"alnum_prop": 0.6659404502541757,
"repo_name": "syncle/syncle.github.io",
"id": "9230a7eb37e0e1541b90aedd8ee3b1f68143a799",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/gen_button.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "211028"
},
{
"name": "HTML",
"bytes": "250812"
},
{
"name": "JavaScript",
"bytes": "1648"
},
{
"name": "Python",
"bytes": "29355"
},
{
"name": "Shell",
"bytes": "1109"
},
{
"name": "TeX",
"bytes": "6699"
}
],
"symlink_target": ""
}
|
"""
Example of using Google Maps queries and PyMap3D
https://developers.google.com/places/web-service/search
This requires a Google Cloud key, and costs a couple US cents per query.
TODO: Would like to instead query a larger region, would OSM be an option?
"""
import functools
from argparse import ArgumentParser
from pathlib import Path
import pandas
import requests
from pymap3d.vincenty import vdist
URL = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?"
@functools.lru_cache()
def get_place_coords(
place_type: str, latitude: float, longitude: float, search_radius_km: int, keyfn: Path
) -> pandas.DataFrame:
"""
Get places using Google Maps Places API
Requires you to have a Google Cloud account with API key.
"""
keyfn = Path(keyfn).expanduser()
key = keyfn.read_text()
stub = URL + f"location={latitude},{longitude}"
stub += f"&radius={search_radius_km * 1000}"
stub += f"&types={place_type}"
stub += f"&key={key}"
r = requests.get(stub)
r.raise_for_status()
place_json = r.json()["results"]
places = pandas.DataFrame(
index=[p["name"] for p in place_json],
columns=["latitude", "longitude", "distance_km", "vicinity"],
)
places["latitude"] = [p["geometry"]["location"]["lat"] for p in place_json]
places["longitude"] = [p["geometry"]["location"]["lng"] for p in place_json]
places["vicinity"] = [p["vicinity"] for p in place_json]
return places
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument(
"place_type",
help="Place type to search: https://developers.google.com/places/supported_types",
)
p.add_argument(
"searchloc", help="initial latituude, longitude to search from", nargs=2, type=float
)
p.add_argument("radius", help="search radius (kilometers)", type=int)
p.add_argument("refloc", help="reference location (lat, lon)", nargs=2, type=float)
p.add_argument("-k", "--keyfn", help="Google Places API key file", default="~/googlemaps.key")
a = p.parse_args()
place_coords = get_place_coords(a.place_type, *a.searchloc, a.radius, a.keyfn)
place_coords["distance_km"] = (
vdist(place_coords["latitude"], place_coords["longitude"], *a.refloc)[0] / 1e3
)
|
{
"content_hash": "6c7c438c3318c3e1f4115a9d13f0e12b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 98,
"avg_line_length": 30.546666666666667,
"alnum_prop": 0.6551724137931034,
"repo_name": "geospace-code/pymap3d",
"id": "f0755e4798c1ffba4ba50bd907474eab41211e92",
"size": "2313",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "Examples/vdist_poi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "MATLAB",
"bytes": "512"
},
{
"name": "Python",
"bytes": "166232"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 0);
|
{
"content_hash": "c54866ef79433b69fdd80dc00d4ad2f8",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 172,
"avg_line_length": 38.857142857142854,
"alnum_prop": 0.7132352941176471,
"repo_name": "antoinecarme/pyaf",
"id": "7ee2f79978e4ac84b24b901d2e52e84ba6416499",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Quantization/trend_ConstantTrend/cycle_7/ar_/test_artificial_128_Quantization_ConstantTrend_7__100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import sys
from mixbox.binding_utils import *
class malwareMetaData(GeneratedsSuper):
"""This is the top level element for the xml document. Required
attribute is version. Open issues: 2. Right way to express
commonality in field data so that it can be combined properly 3.
How to handle unicode in urls Change list 08/26/2011 Clean-file
attribute based changes 1. added digitalSignature to objects 2.
added softwarePackage to objects 3. added taggant to objects 4.
added numerous elements to fileObject 11/12/2009 1. adding
documentation across the schema 2. added partner to
OriginTypeEnum 3. made sha1 in fileObject optional 4. added
isDamaged as a propertyType 5. changed property name isNon-
replicating to isNonReplicating 6/11/2009 1. incremented version
2.Rename parents/children in relationship to source/target 3.
Add generic relationship, ‘relatedTo’ 4. Make commonality
element in fieldDataEntry optional 5. Add unknown element to
origintypeenum 6. Remove ipv4 and ipv6 from locationenum 7. Make
id on ip object startaddress-endaddress even if startaddress ==
endaddress. Added IPRange type 8. Add optional firstSeenDate to
fieldDataEntry, for first time entity providing data saw the
object 6/4/2009 1. File - id should be a xs:hexBinary 2. File -
extraHash should be a xs:string 3. Uri – add optional
ipProtocol field, with enumeration of values tcp/udp/icmp etc.
4. Uri – add documentation that protocol in uri needs to be
either from well known list (from iana.org) or ‘unknown’ 5.
Domain - need to fix documentation for domain – example is
wrong 6. registry – remove valuedata – it is in a property
7. ip object – rename to ip, and give it a start address and
end address. Share a single address by making start and end the
same. Id will be address or startaddress-endaddress 8. service
– delete – subsumed by uri with extra data elements in it 9.
classification – remove modifiers (attributes) on category and
put in properties 10. classification – add documentation that
category is companyname:category 11. objectProperty – move
timestamp to be top level instead of on each property and make
it required 12. relationship – make timestamp required 13.
relationship – add doc on runs. removed 'exploits' - it refers
to environment object that no longer exists 14. added comment
field to propertyenum 15. made timeStamp -> timestamp for
consistency 16.incremented version 5/31/2009 1. incremented
version 2. changed url to uri 3. removed environment object and
related enumerations 4. added restriction on uri to not allow a
question mark (?) 5/15/2009 1. incremented version 2. Added
neutral classification type 3. Added numberOfWebsitesHosting and
numberOfWebsitesRedirecting to volume units enumeration 4. added
referrer, operatingSystem, userAgent and browser to properties
5. made classification type attribute required 5/8/2009 1. added
new object type for asn 2. moved domain information to
properties, so that domains info can be timestamped 3. added
properties for geolocation of an ip address 4. added property
for location url for a file 5. added VolumeUnitsEnum and volume
tag in fieldData. This is to allow sharing of actual prevalence
numbers, with various units. 6. Added ipProtocol (tcp/udp) to
service object. Also changed names of expectedProtocol and
actualProtocol to be expectedApplicationProtocol and
actualApplicationProtocol 7. added 'references' surrounding tag
to ref tag in fieldDataEntry and objectProperty, so that can
assign multiple references if required 8. made id on file back
to hexBinary. Use length to figure out what hash it is. 9.
incremented version 10. added properties for httpMethod and
postData 11. added relationship types 'contactedBy' and
'downloadedFrom' 4/17/2009 1. Incremented version 2. Added
unwanted to ClassificationTypeEnum 3. Added text about ids for
files to documentation 4. Removed filename from file object
definition 5. Relaxed requirement on id of file to be an
xs:hexString to be an xs:string to allow e.g. md5:aaaaabbbbccc
as an id. Not enormously happy about that… 6. Made sha256
optional and sha1 required in files 7. Added “open issues”
section in documentation for top level element 8. Category is
now an xs:string; deleted CategoryTypeEnum 9. Added comment to
doc on fieldDataEntry about using standard time periods, but
kept start date and end date 10. Added objectProperties element,
and example illustratingProperties.xml. Currently allowed
properties are filename, filepath, registryValueData and
urlParameterString. There is an optional timestamp on each
property. I allowed objectProperty to have an id, so that it can
be referenced elsewhere, although we might want to re-think
that. 11. Added some better documentation to relationships 12.
Added more documentation throughout The version of the schema.
This is currently fixed to be 1.1. A required identifier for the
document."""
subclass = None
superclass = None
def __init__(self, version=None, id=None, company=None, author=None, comment=None, timestamp=None, objects=None, objectProperties=None, relationships=None, fieldData=None):
self.version = _cast(float, version)
self.id = _cast(None, id)
self.company = company
self.author = author
self.comment = comment
self.timestamp = timestamp
self.objects = objects
self.objectProperties = objectProperties
self.relationships = relationships
self.fieldData = fieldData
def factory(*args_, **kwargs_):
if malwareMetaData.subclass:
return malwareMetaData.subclass(*args_, **kwargs_)
else:
return malwareMetaData(*args_, **kwargs_)
factory = staticmethod(factory)
def get_company(self): return self.company
def set_company(self, company): self.company = company
def get_author(self): return self.author
def set_author(self, author): self.author = author
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_timestamp(self): return self.timestamp
def set_timestamp(self, timestamp): self.timestamp = timestamp
def get_objects(self): return self.objects
def set_objects(self, objects): self.objects = objects
def get_objectProperties(self): return self.objectProperties
def set_objectProperties(self, objectProperties): self.objectProperties = objectProperties
def get_relationships(self): return self.relationships
def set_relationships(self, relationships): self.relationships = relationships
def get_fieldData(self): return self.fieldData
def set_fieldData(self, fieldData): self.fieldData = fieldData
def get_version(self): return self.version
def set_version(self, version): self.version = version
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='malwareMetaData', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='malwareMetaData')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='malwareMetaData'):
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
write(' version="%s"' % self.gds_format_float(self.version, input_name='version'))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id)))
def exportChildren(self, write, level, namespace_='', name_='malwareMetaData', fromsubclass_=False):
if self.company is not None:
showIndent(write, level)
write('<%scompany>%s</%scompany>\n' % (namespace_, quote_xml(self.company), namespace_))
if self.author is not None:
showIndent(write, level)
write('<%sauthor>%s</%sauthor>\n' % (namespace_, quote_xml(self.author), namespace_))
if self.comment is not None:
showIndent(write, level)
write('<%scomment>%s</%scomment>\n' % (namespace_, quote_xml(self.comment), namespace_))
if self.timestamp is not None:
showIndent(write, level)
write('<%stimestamp>%s</%stimestamp>\n' % (namespace_, quote_xml(self.timestamp), namespace_))
if self.objects is not None:
self.objects.export(write, level, namespace_, name_='objects')
if self.objectProperties is not None:
self.objectProperties.export(write, level, namespace_, name_='objectProperties')
if self.relationships is not None:
self.relationships.export(write, level, namespace_, name_='relationships')
if self.fieldData is not None:
self.fieldData.export(write, level, namespace_, name_='fieldData')
def hasContent_(self):
if (
self.company is not None or
self.author is not None or
self.comment is not None or
self.timestamp is not None or
self.objects is not None or
self.objectProperties is not None or
self.relationships is not None or
self.fieldData is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.append('version')
try:
self.version = float(value)
except ValueError as exp:
raise ValueError('Bad float/double attribute (version): %s' % exp)
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'company':
company_ = child_.text
company_ = self.gds_validate_string(company_, node, 'company')
self.company = company_
elif nodeName_ == 'author':
author_ = child_.text
author_ = self.gds_validate_string(author_, node, 'author')
self.author = author_
elif nodeName_ == 'comment':
comment_ = child_.text
comment_ = self.gds_validate_string(comment_, node, 'comment')
self.comment = comment_
elif nodeName_ == 'timestamp':
timestamp_ = child_.text
timestamp_ = self.gds_validate_string(timestamp_, node, 'timestamp')
self.timestamp = timestamp_
elif nodeName_ == 'objects':
obj_ = objects.factory()
obj_.build(child_)
self.set_objects(obj_)
elif nodeName_ == 'objectProperties':
obj_ = objectProperties.factory()
obj_.build(child_)
self.set_objectProperties(obj_)
elif nodeName_ == 'relationships':
obj_ = relationships.factory()
obj_.build(child_)
self.set_relationships(obj_)
elif nodeName_ == 'fieldData':
obj_ = fieldData.factory()
obj_.build(child_)
self.set_fieldData(obj_)
# end class malwareMetaData
class objects(GeneratedsSuper):
"""Objects are globally unique files, urls, domain, registry, ipAddress
etc. The data within the object is supporting data for the
globally unique object. For example, files have an id (by
convention the hash, sha256 if available, else weaker ones), and
the data for the file is the hashes, sizes etc. Urls have an id
(the url itself), and data which is simply the url parts broken
out. There are no dates, etc in the objects. These are first
class, global objects."""
subclass = None
superclass = None
def __init__(self, file=None, uri=None, domain=None, registry=None, ip=None, asn=None, entity=None, classification=None, softwarePackage=None, digitalSignature=None, taggant=None):
if file is None:
self.file = []
else:
self.file = file
if uri is None:
self.uri = []
else:
self.uri = uri
if domain is None:
self.domain = []
else:
self.domain = domain
if registry is None:
self.registry = []
else:
self.registry = registry
if ip is None:
self.ip = []
else:
self.ip = ip
if asn is None:
self.asn = []
else:
self.asn = asn
if entity is None:
self.entity = []
else:
self.entity = entity
if classification is None:
self.classification = []
else:
self.classification = classification
if softwarePackage is None:
self.softwarePackage = []
else:
self.softwarePackage = softwarePackage
if digitalSignature is None:
self.digitalSignature = []
else:
self.digitalSignature = digitalSignature
if taggant is None:
self.taggant = []
else:
self.taggant = taggant
def factory(*args_, **kwargs_):
if objects.subclass:
return objects.subclass(*args_, **kwargs_)
else:
return objects(*args_, **kwargs_)
factory = staticmethod(factory)
def get_file(self): return self.file
def set_file(self, file): self.file = file
def add_file(self, value): self.file.append(value)
def insert_file(self, index, value): self.file[index] = value
def get_uri(self): return self.uri
def set_uri(self, uri): self.uri = uri
def add_uri(self, value): self.uri.append(value)
def insert_uri(self, index, value): self.uri[index] = value
def get_domain(self): return self.domain
def set_domain(self, domain): self.domain = domain
def add_domain(self, value): self.domain.append(value)
def insert_domain(self, index, value): self.domain[index] = value
def get_registry(self): return self.registry
def set_registry(self, registry): self.registry = registry
def add_registry(self, value): self.registry.append(value)
def insert_registry(self, index, value): self.registry[index] = value
def get_ip(self): return self.ip
def set_ip(self, ip): self.ip = ip
def add_ip(self, value): self.ip.append(value)
def insert_ip(self, index, value): self.ip[index] = value
def get_asn(self): return self.asn
def set_asn(self, asn): self.asn = asn
def add_asn(self, value): self.asn.append(value)
def insert_asn(self, index, value): self.asn[index] = value
def get_entity(self): return self.entity
def set_entity(self, entity): self.entity = entity
def add_entity(self, value): self.entity.append(value)
def insert_entity(self, index, value): self.entity[index] = value
def get_classification(self): return self.classification
def set_classification(self, classification): self.classification = classification
def add_classification(self, value): self.classification.append(value)
def insert_classification(self, index, value): self.classification[index] = value
def get_softwarePackage(self): return self.softwarePackage
def set_softwarePackage(self, softwarePackage): self.softwarePackage = softwarePackage
def add_softwarePackage(self, value): self.softwarePackage.append(value)
def insert_softwarePackage(self, index, value): self.softwarePackage[index] = value
def get_digitalSignature(self): return self.digitalSignature
def set_digitalSignature(self, digitalSignature): self.digitalSignature = digitalSignature
def add_digitalSignature(self, value): self.digitalSignature.append(value)
def insert_digitalSignature(self, index, value): self.digitalSignature[index] = value
def get_taggant(self): return self.taggant
def set_taggant(self, taggant): self.taggant = taggant
def add_taggant(self, value): self.taggant.append(value)
def insert_taggant(self, index, value): self.taggant[index] = value
def export(self, write, level, namespace_='', name_='objects', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='objects')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='objects'):
pass
def exportChildren(self, write, level, namespace_='', name_='objects', fromsubclass_=False):
for file_ in self.file:
file_.export(write, level, namespace_, name_='file')
for uri_ in self.uri:
uri_.export(write, level, namespace_, name_='uri')
for domain_ in self.domain:
domain_.export(write, level, namespace_, name_='domain')
for registry_ in self.registry:
registry_.export(write, level, namespace_, name_='registry')
for ip_ in self.ip:
ip_.export(write, level, namespace_, name_='ip')
for asn_ in self.asn:
asn_.export(write, level, namespace_, name_='asn')
for entity_ in self.entity:
entity_.export(write, level, namespace_, name_='entity')
for classification_ in self.classification:
classification_.export(write, level, namespace_, name_='classification')
for softwarePackage_ in self.softwarePackage:
softwarePackage_.export(write, level, namespace_, name_='softwarePackage')
for digitalSignature_ in self.digitalSignature:
digitalSignature_.export(write, level, namespace_, name_='digitalSignature')
for taggant_ in self.taggant:
taggant_.export(write, level, namespace_, name_='taggant')
def hasContent_(self):
if (
self.file or
self.uri or
self.domain or
self.registry or
self.ip or
self.asn or
self.entity or
self.classification or
self.softwarePackage or
self.digitalSignature or
self.taggant
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'file':
obj_ = fileObject.factory()
obj_.build(child_)
self.file.append(obj_)
elif nodeName_ == 'uri':
obj_ = uriObject.factory()
obj_.build(child_)
self.uri.append(obj_)
elif nodeName_ == 'domain':
obj_ = domainObject.factory()
obj_.build(child_)
self.domain.append(obj_)
elif nodeName_ == 'registry':
obj_ = registryObject.factory()
obj_.build(child_)
self.registry.append(obj_)
elif nodeName_ == 'ip':
obj_ = IPObject.factory()
obj_.build(child_)
self.ip.append(obj_)
elif nodeName_ == 'asn':
obj_ = ASNObject.factory()
obj_.build(child_)
self.asn.append(obj_)
elif nodeName_ == 'entity':
obj_ = entityObject.factory()
obj_.build(child_)
self.entity.append(obj_)
elif nodeName_ == 'classification':
obj_ = classificationObject.factory()
obj_.build(child_)
self.classification.append(obj_)
elif nodeName_ == 'softwarePackage':
obj_ = softwarePackageObject.factory()
obj_.build(child_)
self.softwarePackage.append(obj_)
elif nodeName_ == 'digitalSignature':
obj_ = digitalSignatureObject.factory()
obj_.build(child_)
self.digitalSignature.append(obj_)
elif nodeName_ == 'taggant':
obj_ = taggantObject.factory()
obj_.build(child_)
self.taggant.append(obj_)
# end class objects
class objectProperties(GeneratedsSuper):
"""Properties of objects that do not make sense as relationships. e.g.
file names, url parameter strings, registry value data."""
subclass = None
superclass = None
def __init__(self, objectProperty=None):
if objectProperty is None:
self.objectProperty = []
else:
self.objectProperty = objectProperty
def factory(*args_, **kwargs_):
if objectProperties.subclass:
return objectProperties.subclass(*args_, **kwargs_)
else:
return objectProperties(*args_, **kwargs_)
factory = staticmethod(factory)
def get_objectProperty(self): return self.objectProperty
def set_objectProperty(self, objectProperty): self.objectProperty = objectProperty
def add_objectProperty(self, value): self.objectProperty.append(value)
def insert_objectProperty(self, index, value): self.objectProperty[index] = value
def export(self, write, level, namespace_='', name_='objectProperties', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='objectProperties')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='objectProperties'):
pass
def exportChildren(self, write, level, namespace_='', name_='objectProperties', fromsubclass_=False):
for objectProperty_ in self.objectProperty:
objectProperty_.export(write, level, namespace_, name_='objectProperty')
def hasContent_(self):
if (
self.objectProperty
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'objectProperty':
obj_ = objectProperty.factory()
obj_.build(child_)
self.objectProperty.append(obj_)
# end class objectProperties
class relationships(GeneratedsSuper):
"""Relationships between objects."""
subclass = None
superclass = None
def __init__(self, relationship=None):
if relationship is None:
self.relationship = []
else:
self.relationship = relationship
def factory(*args_, **kwargs_):
if relationships.subclass:
return relationships.subclass(*args_, **kwargs_)
else:
return relationships(*args_, **kwargs_)
factory = staticmethod(factory)
def get_relationship(self): return self.relationship
def set_relationship(self, relationship): self.relationship = relationship
def add_relationship(self, value): self.relationship.append(value)
def insert_relationship(self, index, value): self.relationship[index] = value
def export(self, write, level, namespace_='', name_='relationships', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='relationships')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='relationships'):
pass
def exportChildren(self, write, level, namespace_='', name_='relationships', fromsubclass_=False):
for relationship_ in self.relationship:
relationship_.export(write, level, namespace_, name_='relationship')
def hasContent_(self):
if (
self.relationship
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'relationship':
obj_ = relationship.factory()
obj_.build(child_)
self.relationship.append(obj_)
# end class relationships
class fieldData(GeneratedsSuper):
"""Prevalence data."""
subclass = None
superclass = None
def __init__(self, fieldDataEntry=None):
if fieldDataEntry is None:
self.fieldDataEntry = []
else:
self.fieldDataEntry = fieldDataEntry
def factory(*args_, **kwargs_):
if fieldData.subclass:
return fieldData.subclass(*args_, **kwargs_)
else:
return fieldData(*args_, **kwargs_)
factory = staticmethod(factory)
def get_fieldDataEntry(self): return self.fieldDataEntry
def set_fieldDataEntry(self, fieldDataEntry): self.fieldDataEntry = fieldDataEntry
def add_fieldDataEntry(self, value): self.fieldDataEntry.append(value)
def insert_fieldDataEntry(self, index, value): self.fieldDataEntry[index] = value
def export(self, write, level, namespace_='', name_='fieldData', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='fieldData')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='fieldData'):
pass
def exportChildren(self, write, level, namespace_='', name_='fieldData', fromsubclass_=False):
for fieldDataEntry_ in self.fieldDataEntry:
fieldDataEntry_.export(write, level, namespace_, name_='fieldDataEntry')
def hasContent_(self):
if (
self.fieldDataEntry
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'fieldDataEntry':
obj_ = fieldDataEntry.factory()
obj_.build(child_)
self.fieldDataEntry.append(obj_)
# end class fieldData
class fileObject(GeneratedsSuper):
"""Object definition for files. The required attribute is the id, which
needs to be globally unique. By convention, the value used is a
hash, the stronger the better. The choice should be: use sha256
if you have it, if not use sha1, if not use md5. Other hashes
and file sizes are recorded in the elements. File names are put
in as properties."""
subclass = None
superclass = None
def __init__(self, id=None, md5=None, sha1=None, sha256=None, sha512=None, size=None, crc32=None, fileType=None, extraHash=None, filename=None, normalizedNativePath=None, filenameWithinInstaller=None, folderWithinInstaller=None, vendor=None, internalName=None, language=None, productName=None, fileVersion=None, productVersion=None, developmentEnvironment=None, checksum=None, architecture=None, buildTimeDateStamp=None, compilerVersion=None, linkerVersion=None, minOSVersionCPE=None, numberOfSections=None, MIMEType=None, requiredPrivilege=None, digitalSignature=None, taggant=None):
self.id = _cast(None, id)
self.md5 = md5
self.sha1 = sha1
self.sha256 = sha256
self.sha512 = sha512
self.size = size
self.crc32 = crc32
if fileType is None:
self.fileType = []
else:
self.fileType = fileType
if extraHash is None:
self.extraHash = []
else:
self.extraHash = extraHash
if filename is None:
self.filename = []
else:
self.filename = filename
if normalizedNativePath is None:
self.normalizedNativePath = []
else:
self.normalizedNativePath = normalizedNativePath
if filenameWithinInstaller is None:
self.filenameWithinInstaller = []
else:
self.filenameWithinInstaller = filenameWithinInstaller
if folderWithinInstaller is None:
self.folderWithinInstaller = []
else:
self.folderWithinInstaller = folderWithinInstaller
self.vendor = vendor
if internalName is None:
self.internalName = []
else:
self.internalName = internalName
if language is None:
self.language = []
else:
self.language = language
self.productName = productName
self.fileVersion = fileVersion
self.productVersion = productVersion
self.developmentEnvironment = developmentEnvironment
self.checksum = checksum
self.architecture = architecture
self.buildTimeDateStamp = buildTimeDateStamp
self.compilerVersion = compilerVersion
self.linkerVersion = linkerVersion
self.minOSVersionCPE = minOSVersionCPE
self.numberOfSections = numberOfSections
self.MIMEType = MIMEType
self.requiredPrivilege = requiredPrivilege
self.digitalSignature = digitalSignature
self.taggant = taggant
def factory(*args_, **kwargs_):
if fileObject.subclass:
return fileObject.subclass(*args_, **kwargs_)
else:
return fileObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_md5(self): return self.md5
def set_md5(self, md5): self.md5 = md5
def get_sha1(self): return self.sha1
def set_sha1(self, sha1): self.sha1 = sha1
def get_sha256(self): return self.sha256
def set_sha256(self, sha256): self.sha256 = sha256
def get_sha512(self): return self.sha512
def set_sha512(self, sha512): self.sha512 = sha512
def get_size(self): return self.size
def set_size(self, size): self.size = size
def get_crc32(self): return self.crc32
def set_crc32(self, crc32): self.crc32 = crc32
def get_fileType(self): return self.fileType
def set_fileType(self, fileType): self.fileType = fileType
def add_fileType(self, value): self.fileType.append(value)
def insert_fileType(self, index, value): self.fileType[index] = value
def get_extraHash(self): return self.extraHash
def set_extraHash(self, extraHash): self.extraHash = extraHash
def add_extraHash(self, value): self.extraHash.append(value)
def insert_extraHash(self, index, value): self.extraHash[index] = value
def get_filename(self): return self.filename
def set_filename(self, filename): self.filename = filename
def add_filename(self, value): self.filename.append(value)
def insert_filename(self, index, value): self.filename[index] = value
def get_normalizedNativePath(self): return self.normalizedNativePath
def set_normalizedNativePath(self, normalizedNativePath): self.normalizedNativePath = normalizedNativePath
def add_normalizedNativePath(self, value): self.normalizedNativePath.append(value)
def insert_normalizedNativePath(self, index, value): self.normalizedNativePath[index] = value
def get_filenameWithinInstaller(self): return self.filenameWithinInstaller
def set_filenameWithinInstaller(self, filenameWithinInstaller): self.filenameWithinInstaller = filenameWithinInstaller
def add_filenameWithinInstaller(self, value): self.filenameWithinInstaller.append(value)
def insert_filenameWithinInstaller(self, index, value): self.filenameWithinInstaller[index] = value
def get_folderWithinInstaller(self): return self.folderWithinInstaller
def set_folderWithinInstaller(self, folderWithinInstaller): self.folderWithinInstaller = folderWithinInstaller
def add_folderWithinInstaller(self, value): self.folderWithinInstaller.append(value)
def insert_folderWithinInstaller(self, index, value): self.folderWithinInstaller[index] = value
def get_vendor(self): return self.vendor
def set_vendor(self, vendor): self.vendor = vendor
def get_internalName(self): return self.internalName
def set_internalName(self, internalName): self.internalName = internalName
def add_internalName(self, value): self.internalName.append(value)
def insert_internalName(self, index, value): self.internalName[index] = value
def get_language(self): return self.language
def set_language(self, language): self.language = language
def add_language(self, value): self.language.append(value)
def insert_language(self, index, value): self.language[index] = value
def get_productName(self): return self.productName
def set_productName(self, productName): self.productName = productName
def get_fileVersion(self): return self.fileVersion
def set_fileVersion(self, fileVersion): self.fileVersion = fileVersion
def get_productVersion(self): return self.productVersion
def set_productVersion(self, productVersion): self.productVersion = productVersion
def get_developmentEnvironment(self): return self.developmentEnvironment
def set_developmentEnvironment(self, developmentEnvironment): self.developmentEnvironment = developmentEnvironment
def get_checksum(self): return self.checksum
def set_checksum(self, checksum): self.checksum = checksum
def get_architecture(self): return self.architecture
def set_architecture(self, architecture): self.architecture = architecture
def get_buildTimeDateStamp(self): return self.buildTimeDateStamp
def set_buildTimeDateStamp(self, buildTimeDateStamp): self.buildTimeDateStamp = buildTimeDateStamp
def get_compilerVersion(self): return self.compilerVersion
def set_compilerVersion(self, compilerVersion): self.compilerVersion = compilerVersion
def get_linkerVersion(self): return self.linkerVersion
def set_linkerVersion(self, linkerVersion): self.linkerVersion = linkerVersion
def get_minOSVersionCPE(self): return self.minOSVersionCPE
def set_minOSVersionCPE(self, minOSVersionCPE): self.minOSVersionCPE = minOSVersionCPE
def get_numberOfSections(self): return self.numberOfSections
def set_numberOfSections(self, numberOfSections): self.numberOfSections = numberOfSections
def get_MIMEType(self): return self.MIMEType
def set_MIMEType(self, MIMEType): self.MIMEType = MIMEType
def get_requiredPrivilege(self): return self.requiredPrivilege
def set_requiredPrivilege(self, requiredPrivilege): self.requiredPrivilege = requiredPrivilege
def get_digitalSignature(self): return self.digitalSignature
def set_digitalSignature(self, digitalSignature): self.digitalSignature = digitalSignature
def get_taggant(self): return self.taggant
def set_taggant(self, taggant): self.taggant = taggant
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='fileObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='fileObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='fileObject'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id), ))
def exportChildren(self, write, level, namespace_='', name_='fileObject', fromsubclass_=False):
if self.md5 is not None:
self.md5.export(write, level, namespace_, name_='md5', )
if self.sha1 is not None:
self.sha1.export(write, level, namespace_, name_='sha1')
if self.sha256 is not None:
self.sha256.export(write, level, namespace_, name_='sha256')
if self.sha512 is not None:
self.sha512.export(write, level, namespace_, name_='sha512')
if self.size is not None:
showIndent(write, level)
write('<%ssize>%s</%ssize>\n' % (namespace_, self.gds_format_integer(self.size, input_name='size'), namespace_))
if self.crc32 is not None:
showIndent(write, level)
write('<%scrc32>%s</%scrc32>\n' % (namespace_, quote_xml(self.crc32), namespace_))
for fileType_ in self.fileType:
showIndent(write, level)
write('<%sfileType>%s</%sfileType>\n' % (namespace_, quote_xml(fileType_), namespace_))
for extraHash_ in self.extraHash:
extraHash_.export(write, level, namespace_, name_='extraHash')
for filename_ in self.filename:
showIndent(write, level)
write('<%sfilename>%s</%sfilename>\n' % (namespace_, quote_xml(filename_), namespace_))
for normalizedNativePath_ in self.normalizedNativePath:
showIndent(write, level)
write('<%snormalizedNativePath>%s</%snormalizedNativePath>\n' % (namespace_, quote_xml(normalizedNativePath_), namespace_))
for filenameWithinInstaller_ in self.filenameWithinInstaller:
showIndent(write, level)
write('<%sfilenameWithinInstaller>%s</%sfilenameWithinInstaller>\n' % (namespace_, quote_xml(filenameWithinInstaller_), namespace_))
for folderWithinInstaller_ in self.folderWithinInstaller:
showIndent(write, level)
write('<%sfolderWithinInstaller>%s</%sfolderWithinInstaller>\n' % (namespace_, quote_xml(folderWithinInstaller_), namespace_))
if self.vendor is not None:
showIndent(write, level)
write('<%svendor>%s</%svendor>\n' % (namespace_, quote_xml(self.vendor), namespace_))
for internalName_ in self.internalName:
showIndent(write, level)
write('<%sinternalName>%s</%sinternalName>\n' % (namespace_, quote_xml(internalName_), namespace_))
for language_ in self.language:
showIndent(write, level)
write('<%slanguage>%s</%slanguage>\n' % (namespace_, quote_xml(language_), namespace_))
if self.productName is not None:
showIndent(write, level)
write('<%sproductName>%s</%sproductName>\n' % (namespace_, quote_xml(self.productName), namespace_))
if self.fileVersion is not None:
showIndent(write, level)
write('<%sfileVersion>%s</%sfileVersion>\n' % (namespace_, quote_xml(self.fileVersion), namespace_))
if self.productVersion is not None:
showIndent(write, level)
write('<%sproductVersion>%s</%sproductVersion>\n' % (namespace_, quote_xml(self.productVersion), namespace_))
if self.developmentEnvironment is not None:
showIndent(write, level)
write('<%sdevelopmentEnvironment>%s</%sdevelopmentEnvironment>\n' % (namespace_, quote_xml(self.developmentEnvironment), namespace_))
if self.checksum is not None:
self.checksum.export(write, level, namespace_, name_='checksum')
if self.architecture is not None:
showIndent(write, level)
write('<%sarchitecture>%s</%sarchitecture>\n' % (namespace_, quote_xml(self.architecture), namespace_))
if self.buildTimeDateStamp is not None:
showIndent(write, level)
write('<%sbuildTimeDateStamp>%s</%sbuildTimeDateStamp>\n' % (namespace_, quote_xml(self.buildTimeDateStamp), namespace_))
if self.compilerVersion is not None:
showIndent(write, level)
write('<%scompilerVersion>%s</%scompilerVersion>\n' % (namespace_, quote_xml(self.compilerVersion), namespace_))
if self.linkerVersion is not None:
showIndent(write, level)
write('<%slinkerVersion>%s</%slinkerVersion>\n' % (namespace_, self.gds_format_float(self.linkerVersion, input_name='linkerVersion'), namespace_))
if self.minOSVersionCPE is not None:
showIndent(write, level)
write('<%sminOSVersionCPE>%s</%sminOSVersionCPE>\n' % (namespace_, quote_xml(self.minOSVersionCPE), namespace_))
if self.numberOfSections is not None:
showIndent(write, level)
write('<%snumberOfSections>%s</%snumberOfSections>\n' % (namespace_, self.gds_format_integer(self.numberOfSections, input_name='numberOfSections'), namespace_))
if self.MIMEType is not None:
showIndent(write, level)
write('<%sMIMEType>%s</%sMIMEType>\n' % (namespace_, quote_xml(self.MIMEType), namespace_))
if self.requiredPrivilege is not None:
showIndent(write, level)
write('<%srequiredPrivilege>%s</%srequiredPrivilege>\n' % (namespace_, quote_xml(self.requiredPrivilege), namespace_))
if self.digitalSignature is not None:
self.digitalSignature.export(write, level, namespace_, name_='digitalSignature')
if self.taggant is not None:
self.taggant.export(write, level, namespace_, name_='taggant')
def hasContent_(self):
if (
self.md5 is not None or
self.sha1 is not None or
self.sha256 is not None or
self.sha512 is not None or
self.size is not None or
self.crc32 is not None or
self.fileType or
self.extraHash or
self.filename or
self.normalizedNativePath or
self.filenameWithinInstaller or
self.folderWithinInstaller or
self.vendor is not None or
self.internalName or
self.language or
self.productName is not None or
self.fileVersion is not None or
self.productVersion is not None or
self.developmentEnvironment is not None or
self.checksum is not None or
self.architecture is not None or
self.buildTimeDateStamp is not None or
self.compilerVersion is not None or
self.linkerVersion is not None or
self.minOSVersionCPE is not None or
self.numberOfSections is not None or
self.MIMEType is not None or
self.requiredPrivilege is not None or
self.digitalSignature is not None or
self.taggant is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'md5':
obj_ = xs_hexBinary.factory()
obj_.build(child_)
self.set_md5(obj_)
elif nodeName_ == 'sha1':
obj_ = xs_hexBinary.factory()
obj_.build(child_)
self.set_sha1(obj_)
elif nodeName_ == 'sha256':
obj_ = xs_hexBinary.factory()
obj_.build(child_)
self.set_sha256(obj_)
elif nodeName_ == 'sha512':
obj_ = xs_hexBinary.factory()
obj_.build(child_)
self.set_sha512(obj_)
elif nodeName_ == 'size':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'size')
self.size = ival_
elif nodeName_ == 'crc32':
crc32_ = child_.text
crc32_ = self.gds_validate_string(crc32_, node, 'crc32')
self.crc32 = crc32_
elif nodeName_ == 'fileType':
fileType_ = child_.text
fileType_ = self.gds_validate_string(fileType_, node, 'fileType')
self.fileType.append(fileType_)
elif nodeName_ == 'extraHash':
obj_ = extraHash.factory()
obj_.build(child_)
self.extraHash.append(obj_)
elif nodeName_ == 'filename':
filename_ = child_.text
filename_ = self.gds_validate_string(filename_, node, 'filename')
self.filename.append(filename_)
elif nodeName_ == 'normalizedNativePath':
normalizedNativePath_ = child_.text
normalizedNativePath_ = self.gds_validate_string(normalizedNativePath_, node, 'normalizedNativePath')
self.normalizedNativePath.append(normalizedNativePath_)
elif nodeName_ == 'filenameWithinInstaller':
filenameWithinInstaller_ = child_.text
filenameWithinInstaller_ = self.gds_validate_string(filenameWithinInstaller_, node, 'filenameWithinInstaller')
self.filenameWithinInstaller.append(filenameWithinInstaller_)
elif nodeName_ == 'folderWithinInstaller':
folderWithinInstaller_ = child_.text
folderWithinInstaller_ = self.gds_validate_string(folderWithinInstaller_, node, 'folderWithinInstaller')
self.folderWithinInstaller.append(folderWithinInstaller_)
elif nodeName_ == 'vendor':
vendor_ = child_.text
vendor_ = self.gds_validate_string(vendor_, node, 'vendor')
self.vendor = vendor_
elif nodeName_ == 'internalName':
internalName_ = child_.text
internalName_ = self.gds_validate_string(internalName_, node, 'internalName')
self.internalName.append(internalName_)
elif nodeName_ == 'language':
language_ = child_.text
language_ = self.gds_validate_string(language_, node, 'language')
self.language.append(language_)
elif nodeName_ == 'productName':
productName_ = child_.text
productName_ = self.gds_validate_string(productName_, node, 'productName')
self.productName = productName_
elif nodeName_ == 'fileVersion':
fileVersion_ = child_.text
fileVersion_ = self.gds_validate_string(fileVersion_, node, 'fileVersion')
self.fileVersion = fileVersion_
elif nodeName_ == 'productVersion':
productVersion_ = child_.text
productVersion_ = self.gds_validate_string(productVersion_, node, 'productVersion')
self.productVersion = productVersion_
elif nodeName_ == 'developmentEnvironment':
developmentEnvironment_ = child_.text
developmentEnvironment_ = self.gds_validate_string(developmentEnvironment_, node, 'developmentEnvironment')
self.developmentEnvironment = developmentEnvironment_
elif nodeName_ == 'checksum':
obj_ = xs_hexBinary.factory()
obj_.build(child_)
self.set_checksum(obj_)
elif nodeName_ == 'architecture':
architecture_ = child_.text
architecture_ = self.gds_validate_string(architecture_, node, 'architecture')
self.architecture = architecture_
elif nodeName_ == 'buildTimeDateStamp':
buildTimeDateStamp_ = child_.text
buildTimeDateStamp_ = self.gds_validate_string(buildTimeDateStamp_, node, 'buildTimeDateStamp')
self.buildTimeDateStamp = buildTimeDateStamp_
elif nodeName_ == 'compilerVersion':
compilerVersion_ = child_.text
compilerVersion_ = self.gds_validate_string(compilerVersion_, node, 'compilerVersion')
self.compilerVersion = compilerVersion_
elif nodeName_ == 'linkerVersion':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'linkerVersion')
self.linkerVersion = fval_
elif nodeName_ == 'minOSVersionCPE':
minOSVersionCPE_ = child_.text
minOSVersionCPE_ = self.gds_validate_string(minOSVersionCPE_, node, 'minOSVersionCPE')
self.minOSVersionCPE = minOSVersionCPE_
elif nodeName_ == 'numberOfSections':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'numberOfSections')
self.numberOfSections = ival_
elif nodeName_ == 'MIMEType':
MIMEType_ = child_.text
MIMEType_ = self.gds_validate_string(MIMEType_, node, 'MIMEType')
self.MIMEType = MIMEType_
elif nodeName_ == 'requiredPrivilege':
requiredPrivilege_ = child_.text
requiredPrivilege_ = self.gds_validate_string(requiredPrivilege_, node, 'requiredPrivilege')
self.requiredPrivilege = requiredPrivilege_
elif nodeName_ == 'digitalSignature':
obj_ = digitalSignatureObject.factory()
obj_.build(child_)
self.set_digitalSignature(obj_)
elif nodeName_ == 'taggant':
obj_ = taggantObject.factory()
obj_.build(child_)
self.set_taggant(obj_)
# end class fileObject
class extraHash(GeneratedsSuper):
"""Element for inserting fuzzy hashes for example pehash, ssdeep. These
are put in with this element, with a required attribute 'type'
used to hold the type of hash."""
subclass = None
superclass = None
def __init__(self, type_=None, valueOf_=None):
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if extraHash.subclass:
return extraHash.subclass(*args_, **kwargs_)
else:
return extraHash(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, write, level, namespace_='', name_='extraHash', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='extraHash')
if self.hasContent_():
write('>')
write(quote_xml(self.valueOf_))
self.exportChildren(write, level + 1, namespace_, name_)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='extraHash'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
write(' type=%s' % (quote_attrib(self.type_)))
def exportChildren(self, write, level, namespace_='', name_='extraHash', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class extraHash
class registryObject(GeneratedsSuper):
"""Registry object. The required attribute is 'id', which is taken to
be key\\valueName. Keys end in a \, value names start with a \,
so you have e.g. key =
hklm\software\microsoft\currentversion\windows\run\ value =\foo
making the id
hklm\software\microsoft\currentversion\windows\run\\foo"""
subclass = None
superclass = None
def __init__(self, id=None, key=None, valueName=None):
self.id = _cast(None, id)
self.key = key
self.valueName = valueName
def factory(*args_, **kwargs_):
if registryObject.subclass:
return registryObject.subclass(*args_, **kwargs_)
else:
return registryObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_key(self): return self.key
def set_key(self, key): self.key = key
def get_valueName(self): return self.valueName
def set_valueName(self, valueName): self.valueName = valueName
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='registryObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='registryObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='registryObject'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id)))
def exportChildren(self, write, level, namespace_='', name_='registryObject', fromsubclass_=False):
if self.key is not None:
showIndent(write, level)
write('<%skey>%s</%skey>\n' % (namespace_, quote_xml(self.key), namespace_))
if self.valueName is not None:
showIndent(write, level)
write('<%svalueName>%s</%svalueName>\n' % (namespace_, quote_xml(self.valueName), namespace_))
def hasContent_(self):
if (
self.key is not None or
self.valueName is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'key':
key_ = child_.text
key_ = self.gds_validate_string(key_, node, 'key')
self.key = key_
elif nodeName_ == 'valueName':
valueName_ = child_.text
valueName_ = self.gds_validate_string(valueName_, node, 'valueName')
self.valueName = valueName_
# end class registryObject
class entityObject(GeneratedsSuper):
"""Entity Object. This is used to record groups, companies etc., and
departments within organizations. The globally unique id
(attribute) should be constructed from the company and
department name, e.g. "Company name:Department name",
"Mcafee:AVERT labs", or "Russian Business Network"."""
subclass = None
superclass = None
def __init__(self, id=None, name=None):
self.id = _cast(None, id)
self.name = name
def factory(*args_, **kwargs_):
if entityObject.subclass:
return entityObject.subclass(*args_, **kwargs_)
else:
return entityObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='entityObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='entityObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='entityObject'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id)))
def exportChildren(self, write, level, namespace_='', name_='entityObject', fromsubclass_=False):
if self.name is not None:
showIndent(write, level)
write('<%sname>%s</%sname>\n' % (namespace_, quote_xml(self.name), namespace_))
def hasContent_(self):
if (
self.name is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
# end class entityObject
class uriObject(GeneratedsSuper):
"""Uri object. Only required element is uri string itself. There are
elements for each of the broken out elements. The protocol
should be take from the list at http://www.iana.org/assignments
/port-numbers, or if not in that list have the value 'unknown'.
The ipProtocol should be taken from the list
http://www.iana.org/assignments/protocol-numbers/. The elements
correspond to the usual breakdown of a uri into its component
domain, hostname, path, port etc, as described at
http://en.wikipedia.org/wiki/Uniform_Resource_Locator."""
subclass = None
superclass = None
def __init__(self, id=None, uriString=None, protocol=None, hostname=None, domain=None, port=None, path=None, ipProtocol=None):
self.id = _cast(None, id)
self.uriString = uriString
self.protocol = protocol
self.hostname = hostname
self.domain = domain
self.port = port
self.path = path
self.ipProtocol = ipProtocol
def factory(*args_, **kwargs_):
if uriObject.subclass:
return uriObject.subclass(*args_, **kwargs_)
else:
return uriObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_uriString(self): return self.uriString
def set_uriString(self, uriString): self.uriString = uriString
def validate_NoQuestionMark(self, value):
# Validate type NoQuestionMark, a restriction on xs:string.
pass
def get_protocol(self): return self.protocol
def set_protocol(self, protocol): self.protocol = protocol
def get_hostname(self): return self.hostname
def set_hostname(self, hostname): self.hostname = hostname
def get_domain(self): return self.domain
def set_domain(self, domain): self.domain = domain
def get_port(self): return self.port
def set_port(self, port): self.port = port
def get_path(self): return self.path
def set_path(self, path): self.path = path
def get_ipProtocol(self): return self.ipProtocol
def set_ipProtocol(self, ipProtocol): self.ipProtocol = ipProtocol
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='uriObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='uriObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='uriObject'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id), ))
def exportChildren(self, write, level, namespace_='', name_='uriObject', fromsubclass_=False):
if self.uriString is not None:
showIndent(write, level)
write('<%suriString>%s</%suriString>\n' % (namespace_, quote_xml(self.uriString), namespace_))
if self.protocol is not None:
showIndent(write, level)
write('<%sprotocol>%s</%sprotocol>\n' % (namespace_, quote_xml(self.protocol), namespace_))
if self.hostname is not None:
showIndent(write, level)
write('<%shostname>%s</%shostname>\n' % (namespace_, quote_xml(self.hostname), namespace_))
if self.domain is not None:
showIndent(write, level)
write('<%sdomain>%s</%sdomain>\n' % (namespace_, quote_xml(self.domain), namespace_))
if self.port is not None:
showIndent(write, level)
write('<%sport>%s</%sport>\n' % (namespace_, self.gds_format_integer(self.port, input_name='port'), namespace_))
if self.path is not None:
showIndent(write, level)
write('<%spath>%s</%spath>\n' % (namespace_, quote_xml(self.path), namespace_))
if self.ipProtocol is not None:
showIndent(write, level)
write('<%sipProtocol>%s</%sipProtocol>\n' % (namespace_, quote_xml(self.ipProtocol), namespace_))
def hasContent_(self):
if (
self.uriString is not None or
self.protocol is not None or
self.hostname is not None or
self.domain is not None or
self.port is not None or
self.path is not None or
self.ipProtocol is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
self.validate_NoQuestionMark(self.id) # validate type NoQuestionMark
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'uriString':
uriString_ = child_.text
uriString_ = self.gds_validate_string(uriString_, node, 'uriString')
self.uriString = uriString_
self.validate_NoQuestionMark(self.uriString) # validate type NoQuestionMark
elif nodeName_ == 'protocol':
protocol_ = child_.text
protocol_ = self.gds_validate_string(protocol_, node, 'protocol')
self.protocol = protocol_
elif nodeName_ == 'hostname':
hostname_ = child_.text
hostname_ = self.gds_validate_string(hostname_, node, 'hostname')
self.hostname = hostname_
elif nodeName_ == 'domain':
domain_ = child_.text
domain_ = self.gds_validate_string(domain_, node, 'domain')
self.domain = domain_
elif nodeName_ == 'port':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'port')
self.port = ival_
elif nodeName_ == 'path':
path_ = child_.text
path_ = self.gds_validate_string(path_, node, 'path')
self.path = path_
elif nodeName_ == 'ipProtocol':
ipProtocol_ = child_.text
ipProtocol_ = self.gds_validate_string(ipProtocol_, node, 'ipProtocol')
self.ipProtocol = ipProtocol_
# end class uriObject
class IPObject(GeneratedsSuper):
"""IP object. Used to hold ipv4, ipv6 ip addresses and address ranges.
The globally unique id is 'startAddress-endAddress'. There are
two required elements, startAddress and endAddress, make these
the same if you are specifying a single address. Thus for ip
range id, would be e.g. 213.23.45.7-213.23.45.19 For a single
ip, id would be e.g. 12.34.56.1-12.34.56.1"""
subclass = None
superclass = None
def __init__(self, id=None, startAddress=None, endAddress=None):
self.id = _cast(None, id)
self.startAddress = startAddress
self.endAddress = endAddress
def factory(*args_, **kwargs_):
if IPObject.subclass:
return IPObject.subclass(*args_, **kwargs_)
else:
return IPObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_startAddress(self): return self.startAddress
def set_startAddress(self, startAddress): self.startAddress = startAddress
def get_endAddress(self): return self.endAddress
def set_endAddress(self, endAddress): self.endAddress = endAddress
def get_id(self): return self.id
def set_id(self, id): self.id = id
def validate_IPRange(self, value):
# Validate type IPRange, a restriction on xs:string.
pass
def export(self, write, level, namespace_='', name_='IPObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='IPObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='IPObject'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id), ))
def exportChildren(self, write, level, namespace_='', name_='IPObject', fromsubclass_=False):
if self.startAddress is not None:
self.startAddress.export(write, level, namespace_, name_='startAddress', )
if self.endAddress is not None:
self.endAddress.export(write, level, namespace_, name_='endAddress', )
def hasContent_(self):
if (
self.startAddress is not None or
self.endAddress is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
self.validate_IPRange(self.id) # validate type IPRange
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'startAddress':
obj_ = IPAddress.factory()
obj_.build(child_)
self.set_startAddress(obj_)
elif nodeName_ == 'endAddress':
obj_ = IPAddress.factory()
obj_.build(child_)
self.set_endAddress(obj_)
# end class IPObject
class IPAddress(GeneratedsSuper):
"""ip address - string for the actual address and attribute either
ipv4, ipv6."""
subclass = None
superclass = None
def __init__(self, type_=None, valueOf_=None):
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if IPAddress.subclass:
return IPAddress.subclass(*args_, **kwargs_)
else:
return IPAddress(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_IPTypeEnum(self, value):
# Validate type IPTypeEnum, a restriction on xs:string.
pass
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, write, level, namespace_='', name_='IPAddress', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='IPAddress')
if self.hasContent_():
write('>')
write(quote_xml(self.valueOf_))
self.exportChildren(write, level + 1, namespace_, name_)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='IPAddress'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, write, level, namespace_='', name_='IPAddress', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
self.validate_IPTypeEnum(self.type_) # validate type IPTypeEnum
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class IPAddress
class domainObject(GeneratedsSuper):
"""Domain object, used to hold internet domains, e.g.yahoo.com. The
globally unique identifier (id attribute) is the domain itself.
whois information on domain is recorded using object properties."""
subclass = None
superclass = None
def __init__(self, id=None, domain=None):
self.id = _cast(None, id)
self.domain = domain
def factory(*args_, **kwargs_):
if domainObject.subclass:
return domainObject.subclass(*args_, **kwargs_)
else:
return domainObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_domain(self): return self.domain
def set_domain(self, domain): self.domain = domain
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='domainObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='domainObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='domainObject'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id)))
def exportChildren(self, write, level, namespace_='', name_='domainObject', fromsubclass_=False):
if self.domain is not None:
showIndent(write, level)
write('<%sdomain>%s</%sdomain>\n' % (namespace_, quote_xml(self.domain), namespace_))
def hasContent_(self):
if (
self.domain is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'domain':
domain_ = child_.text
domain_ = self.gds_validate_string(domain_, node, 'domain')
self.domain = domain_
# end class domainObject
class ASNObject(GeneratedsSuper):
"""Object used to hold information on Autonomous System Numbers. An
autonomous system (AS) is a collection of connected Internet
Protocol (IP) routing prefixes under the control of one or more
network operators that presents a common, clearly defined
routing policy to the Internet. The id is the number, written as
an integer for both 16 and 32 bit numbers."""
subclass = None
superclass = None
def __init__(self, id=None, as_number=None):
self.id = _cast(int, id)
self.as_number = as_number
def factory(*args_, **kwargs_):
if ASNObject.subclass:
return ASNObject.subclass(*args_, **kwargs_)
else:
return ASNObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_as_number(self): return self.as_number
def set_as_number(self, as_number): self.as_number = as_number
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='ASNObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='ASNObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='ASNObject'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, write, level, namespace_='', name_='ASNObject', fromsubclass_=False):
if self.as_number is not None:
showIndent(write, level)
write('<%sas-number>%s</%sas-number>\n' % (namespace_, self.gds_format_integer(self.as_number, input_name='as-number'), namespace_))
def hasContent_(self):
if (
self.as_number is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
try:
self.id = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'as-number':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'as_number')
self.as_number = ival_
# end class ASNObject
class classificationObject(GeneratedsSuper):
"""Classification object, used to hold names or classifications of
objects. The most common use case for this is detection names
for files from av scanners. However, this object could be used
for general classification. The globally unique id (attribute)
should be created from "Company name:internal classification
name", e.g. "Mcafee:Generic.DX". The other required attribute is
the type of classification, e.g. clean, dirty, unknown. There
are elements to capture the category of the classification. The
category should be entered in the same way to the classification
name, e.g. company name:category name, e..g Mcafee:Trojan."""
subclass = None
superclass = None
def __init__(self, type_=None, id=None, classificationName=None, companyName=None, category=None, classificationDetails=None):
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.classificationName = classificationName
self.companyName = companyName
self.category = category
self.classificationDetails = classificationDetails
def factory(*args_, **kwargs_):
if classificationObject.subclass:
return classificationObject.subclass(*args_, **kwargs_)
else:
return classificationObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_classificationName(self): return self.classificationName
def set_classificationName(self, classificationName): self.classificationName = classificationName
def get_companyName(self): return self.companyName
def set_companyName(self, companyName): self.companyName = companyName
def get_category(self): return self.category
def set_category(self, category): self.category = category
def get_classificationDetails(self): return self.classificationDetails
def set_classificationDetails(self, classificationDetails): self.classificationDetails = classificationDetails
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_ClassificationTypeEnum(self, value):
# Validate type ClassificationTypeEnum, a restriction on xs:string.
pass
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='classificationObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='classificationObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='classificationObject'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
write(' type=%s' % (quote_attrib(self.type_), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id)))
def exportChildren(self, write, level, namespace_='', name_='classificationObject', fromsubclass_=False):
if self.classificationName is not None:
showIndent(write, level)
write('<%sclassificationName>%s</%sclassificationName>\n' % ('mmdef:', quote_xml(self.classificationName), 'mmdef:'))
if self.companyName is not None:
showIndent(write, level)
write('<%scompanyName>%s</%scompanyName>\n' % ('mmdef:', quote_xml(self.companyName), 'mmdef:'))
if self.category is not None:
showIndent(write, level)
write('<%scategory>%s</%scategory>\n' % ('mmdef:', quote_xml(self.category), 'mmdef:'))
if self.classificationDetails is not None:
self.classificationDetails.export(write, level, namespace_, name_='classificationDetails')
def hasContent_(self):
if (
self.classificationName is not None or
self.companyName is not None or
self.category is not None or
self.classificationDetails is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
self.validate_ClassificationTypeEnum(self.type_) # validate type ClassificationTypeEnum
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'classificationName':
classificationName_ = child_.text
classificationName_ = self.gds_validate_string(classificationName_, node, 'classificationName')
self.classificationName = classificationName_
elif nodeName_ == 'companyName':
companyName_ = child_.text
companyName_ = self.gds_validate_string(companyName_, node, 'companyName')
self.companyName = companyName_
elif nodeName_ == 'category':
category_ = child_.text
category_ = self.gds_validate_string(category_, node, 'category')
self.category = category_
elif nodeName_ == 'classificationDetails':
obj_ = classificationDetails.factory()
obj_.build(child_)
self.set_classificationDetails(obj_)
# end class classificationObject
class classificationDetails(GeneratedsSuper):
"""Details of the classification, giving product details, particularly
useful for anti-virus scanner detections."""
subclass = None
superclass = None
def __init__(self, definitionVersion=None, detectionAddedTimeStamp=None, detectionShippedTimeStamp=None, product=None, productVersion=None):
self.definitionVersion = definitionVersion
self.detectionAddedTimeStamp = detectionAddedTimeStamp
self.detectionShippedTimeStamp = detectionShippedTimeStamp
self.product = product
self.productVersion = productVersion
def factory(*args_, **kwargs_):
if classificationDetails.subclass:
return classificationDetails.subclass(*args_, **kwargs_)
else:
return classificationDetails(*args_, **kwargs_)
factory = staticmethod(factory)
def get_definitionVersion(self): return self.definitionVersion
def set_definitionVersion(self, definitionVersion): self.definitionVersion = definitionVersion
def get_detectionAddedTimeStamp(self): return self.detectionAddedTimeStamp
def set_detectionAddedTimeStamp(self, detectionAddedTimeStamp): self.detectionAddedTimeStamp = detectionAddedTimeStamp
def get_detectionShippedTimeStamp(self): return self.detectionShippedTimeStamp
def set_detectionShippedTimeStamp(self, detectionShippedTimeStamp): self.detectionShippedTimeStamp = detectionShippedTimeStamp
def get_product(self): return self.product
def set_product(self, product): self.product = product
def get_productVersion(self): return self.productVersion
def set_productVersion(self, productVersion): self.productVersion = productVersion
def export(self, write, level, namespace_='', name_='classificationDetails', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='classificationDetails')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='classificationDetails'):
pass
def exportChildren(self, write, level, namespace_='', name_='classificationDetails', fromsubclass_=False):
if self.definitionVersion is not None:
showIndent(write, level)
write('<%sdefinitionVersion>%s</%sdefinitionVersion>\n' % (namespace_, quote_xml(self.definitionVersion), namespace_))
if self.detectionAddedTimeStamp is not None:
showIndent(write, level)
write('<%sdetectionAddedTimeStamp>%s</%sdetectionAddedTimeStamp>\n' % (namespace_, quote_xml(self.detectionAddedTimeStamp), namespace_))
if self.detectionShippedTimeStamp is not None:
showIndent(write, level)
write('<%sdetectionShippedTimeStamp>%s</%sdetectionShippedTimeStamp>\n' % (namespace_, quote_xml(self.detectionShippedTimeStamp), namespace_))
if self.product is not None:
showIndent(write, level)
write('<%sproduct>%s</%sproduct>\n' % (namespace_, quote_xml(self.product), namespace_))
if self.productVersion is not None:
showIndent(write, level)
write('<%sproductVersion>%s</%sproductVersion>\n' % (namespace_, quote_xml(self.productVersion), namespace_))
def hasContent_(self):
if (
self.definitionVersion is not None or
self.detectionAddedTimeStamp is not None or
self.detectionShippedTimeStamp is not None or
self.product is not None or
self.productVersion is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'definitionVersion':
definitionVersion_ = child_.text
definitionVersion_ = self.gds_validate_string(definitionVersion_, node, 'definitionVersion')
self.definitionVersion = definitionVersion_
elif nodeName_ == 'detectionAddedTimeStamp':
detectionAddedTimeStamp_ = child_.text
detectionAddedTimeStamp_ = self.gds_validate_string(detectionAddedTimeStamp_, node, 'detectionAddedTimeStamp')
self.detectionAddedTimeStamp = detectionAddedTimeStamp_
elif nodeName_ == 'detectionShippedTimeStamp':
detectionShippedTimeStamp_ = child_.text
detectionShippedTimeStamp_ = self.gds_validate_string(detectionShippedTimeStamp_, node, 'detectionShippedTimeStamp')
self.detectionShippedTimeStamp = detectionShippedTimeStamp_
elif nodeName_ == 'product':
product_ = child_.text
product_ = self.gds_validate_string(product_, node, 'product')
self.product = product_
elif nodeName_ == 'productVersion':
productVersion_ = child_.text
productVersion_ = self.gds_validate_string(productVersion_, node, 'productVersion')
self.productVersion = productVersion_
# end class classificationDetails
class fieldDataEntry(GeneratedsSuper):
"""Data structure to hold prevalence information. The data includes a
reference to another object (which is an xpath expression
pointing to an object inside the 'ref' element), together with a
time period (startDate -> endDate), an origin - where the object
came from, and various location tags. This allows rich
information on prevalence to be recorded. By convention, time
periods should be wherever possible standard time periods, e.g.
minute, hour, 24 hours, week, month, quarter, year. This will
facilitate combination of data from multiple sources. To
represent a single entry, make startDate == endDate. Commonality
is calculated from the sightings of malware objects (and so such
calculation is easier to automate). Importance is reserved for
cases when “commonality” is not available or if there is a
need to communicate the importance when commonality is low. We
define the commonality on a scale 0 to 100 (0 means “never
found in the field” and 100 means “found very
frequently”). Scaling commonality to 0..100 range instead of
using actual sample counts is to avoid the effect of the user
base size on the commonality. We derive commonality from the
number of affected computers – not from the number of samples
(for example, a hundred parasitic infections of the same virus
on a single computer are to be counted as one). To calculate the
commonality we use two-stage approach and logarithmic scale: -
If the number of affected users exceeds 0.1% of your user base
(more frequent than 1 in a 1000) set commonality to “100” -
Otherwise, calculate the ratio of infected computers amongst
your user base by dividing the real number of affected computers
‘n’ by the total number ‘N’ - Apply the following
formula to get the commonality –( log2(1+n*1000/N) ) * 100 -
Round to the closest integer Obviously, the calculation above
can only be applied to counting of malware sightings on
desktops. If telemetry is collected from a fraction of such
desktops then an appropriate correction should be used. For all
other cases (e.g. sighting on gateways, in some network security
appliance, on an ISP level, etc.) please exercise your best
judgment and apply provided desktop guideline as an example to
make sure the commonality factor is as comparable as possible.
For a URL object the commonality could reflect, for example, how
widely it was spammed. “Importance” should not be used
together with “commonality” (unless commonality=“0”) to
avoid possible confusion. High “importance”, for example,
can be assigned to samples that are over-hyped by media when
their commonality is still “0”. Use the following guidelines
for “importance” which is also defined on a scale 0..100:
100 – you’d expect your CEO and/or media to call you any
second about this object 80 – you might get a call from your
CEO and/or media 60 – you’d expect your boss to call you any
second 40 – you might get a call from your boss 20 – someone
is very likely to contact you about this object 10 – you might
get contacted about this object 0 – you’d be surprised if
anyone would ever contact you about this object"""
subclass = None
superclass = None
def __init__(self, references=None, startDate=None, endDate=None, firstSeenDate=None, origin=None, commonality=None, volume=None, importance=None, location=None):
self.references = references
self.startDate = startDate
self.endDate = endDate
self.firstSeenDate = firstSeenDate
self.origin = origin
self.commonality = commonality
if volume is None:
self.volume = []
else:
self.volume = volume
self.importance = importance
self.location = location
def factory(*args_, **kwargs_):
if fieldDataEntry.subclass:
return fieldDataEntry.subclass(*args_, **kwargs_)
else:
return fieldDataEntry(*args_, **kwargs_)
factory = staticmethod(factory)
def get_references(self): return self.references
def set_references(self, references): self.references = references
def get_startDate(self): return self.startDate
def set_startDate(self, startDate): self.startDate = startDate
def get_endDate(self): return self.endDate
def set_endDate(self, endDate): self.endDate = endDate
def get_firstSeenDate(self): return self.firstSeenDate
def set_firstSeenDate(self, firstSeenDate): self.firstSeenDate = firstSeenDate
def get_origin(self): return self.origin
def set_origin(self, origin): self.origin = origin
def validate_OriginTypeEnum(self, value):
# Validate type OriginTypeEnum, a restriction on xs:string.
pass
def get_commonality(self): return self.commonality
def set_commonality(self, commonality): self.commonality = commonality
def validate_intBetween0and100(self, value):
# Validate type intBetween0and100, a restriction on xs:integer.
pass
def get_volume(self): return self.volume
def set_volume(self, volume): self.volume = volume
def add_volume(self, value): self.volume.append(value)
def insert_volume(self, index, value): self.volume[index] = value
def get_importance(self): return self.importance
def set_importance(self, importance): self.importance = importance
def get_location(self): return self.location
def set_location(self, location): self.location = location
def export(self, write, level, namespace_='', name_='fieldDataEntry', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='fieldDataEntry')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='fieldDataEntry'):
pass
def exportChildren(self, write, level, namespace_='', name_='fieldDataEntry', fromsubclass_=False):
if self.references is not None:
self.references.export(write, level, namespace_, name_='references', )
if self.startDate is not None:
showIndent(write, level)
write('<%sstartDate>%s</%sstartDate>\n' % (namespace_, quote_xml(self.startDate), namespace_))
if self.endDate is not None:
showIndent(write, level)
write('<%sendDate>%s</%sendDate>\n' % (namespace_, quote_xml(self.endDate), namespace_))
if self.firstSeenDate is not None:
showIndent(write, level)
write('<%sfirstSeenDate>%s</%sfirstSeenDate>\n' % (namespace_, quote_xml(self.firstSeenDate), namespace_))
if self.origin is not None:
showIndent(write, level)
write('<%sorigin>%s</%sorigin>\n' % (namespace_, quote_xml(self.origin), namespace_))
if self.commonality is not None:
showIndent(write, level)
write('<%scommonality>%s</%scommonality>\n' % (namespace_, self.gds_format_integer(self.commonality, input_name='commonality'), namespace_))
for volume_ in self.volume:
volume_.export(write, level, namespace_, name_='volume')
if self.importance is not None:
showIndent(write, level)
write('<%simportance>%s</%simportance>\n' % (namespace_, self.gds_format_integer(self.importance, input_name='importance'), namespace_))
if self.location is not None:
self.location.export(write, level, namespace_, name_='location')
def hasContent_(self):
if (
self.references is not None or
self.startDate is not None or
self.endDate is not None or
self.firstSeenDate is not None or
self.origin is not None or
self.commonality is not None or
self.volume or
self.importance is not None or
self.location is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'references':
obj_ = references.factory()
obj_.build(child_)
self.set_references(obj_)
elif nodeName_ == 'startDate':
startDate_ = child_.text
startDate_ = self.gds_validate_string(startDate_, node, 'startDate')
self.startDate = startDate_
elif nodeName_ == 'endDate':
endDate_ = child_.text
endDate_ = self.gds_validate_string(endDate_, node, 'endDate')
self.endDate = endDate_
elif nodeName_ == 'firstSeenDate':
firstSeenDate_ = child_.text
firstSeenDate_ = self.gds_validate_string(firstSeenDate_, node, 'firstSeenDate')
self.firstSeenDate = firstSeenDate_
elif nodeName_ == 'origin':
origin_ = child_.text
origin_ = self.gds_validate_string(origin_, node, 'origin')
self.origin = origin_
self.validate_OriginTypeEnum(self.origin) # validate type OriginTypeEnum
elif nodeName_ == 'commonality':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'commonality')
self.commonality = ival_
self.validate_intBetween0and100(self.commonality) # validate type intBetween0and100
elif nodeName_ == 'volume':
obj_ = volume.factory()
obj_.build(child_)
self.volume.append(obj_)
elif nodeName_ == 'importance':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'importance')
self.importance = ival_
self.validate_intBetween0and100(self.importance) # validate type intBetween0and100
elif nodeName_ == 'location':
obj_ = location.factory()
obj_.build(child_)
self.set_location(obj_)
# end class fieldDataEntry
class references(GeneratedsSuper):
"""The objects the prevalence information pertains to."""
subclass = None
superclass = None
def __init__(self, ref=None):
if ref is None:
self.ref = []
else:
self.ref = ref
def factory(*args_, **kwargs_):
if references.subclass:
return references.subclass(*args_, **kwargs_)
else:
return references(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def add_ref(self, value): self.ref.append(value)
def insert_ref(self, index, value): self.ref[index] = value
def export(self, write, level, namespace_='', name_='references', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='references')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='references'):
pass
def exportChildren(self, write, level, namespace_='', name_='references', fromsubclass_=False):
for ref_ in self.ref:
ref_.export(write, level, namespace_, name_='ref')
def hasContent_(self):
if (
self.ref
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ref':
obj_ = reference.factory()
obj_.build(child_)
self.ref.append(obj_)
# end class references
class volume(GeneratedsSuper):
"""Quantitive measurements of prevalence."""
subclass = None
superclass = None
def __init__(self, units=None, valueOf_=None):
self.units = _cast(None, units)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if volume.subclass:
return volume.subclass(*args_, **kwargs_)
else:
return volume(*args_, **kwargs_)
factory = staticmethod(factory)
def get_units(self): return self.units
def set_units(self, units): self.units = units
def validate_VolumeUnitsEnum(self, value):
# Validate type VolumeUnitsEnum, a restriction on xs:string.
pass
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, write, level, namespace_='', name_='volume', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='volume')
if self.hasContent_():
write('>')
write(quote_xml(self.valueOf_))
self.exportChildren(write, level + 1, namespace_, name_)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='volume'):
if self.units is not None and 'units' not in already_processed:
already_processed.append('units')
write(' units=%s' % (quote_attrib(self.units), ))
def exportChildren(self, write, level, namespace_='', name_='volume', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('units', node)
if value is not None and 'units' not in already_processed:
already_processed.append('units')
self.units = value
self.validate_VolumeUnitsEnum(self.units) # validate type VolumeUnitsEnum
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class volume
class location(GeneratedsSuper):
"""Geolocation information for prevalence."""
subclass = None
superclass = None
def __init__(self, type_=None, valueOf_=None):
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if location.subclass:
return location.subclass(*args_, **kwargs_)
else:
return location(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_LocationTypeEnum(self, value):
# Validate type LocationTypeEnum, a restriction on xs:string.
pass
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, write, level, namespace_='', name_='location', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='location')
if self.hasContent_():
write('>')
write(quote_xml(self.valueOf_))
self.exportChildren(write, level + 1, namespace_, name_)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='location'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, write, level, namespace_='', name_='location', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
self.validate_LocationTypeEnum(self.type_) # validate type LocationTypeEnum
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class location
class reference(GeneratedsSuper):
"""Reference element used to hold xpath expressions to objects, for
example file[@id="12345"]."""
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if reference.subclass:
return reference.subclass(*args_, **kwargs_)
else:
return reference(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, write, level, namespace_='', name_='reference', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='reference')
if self.hasContent_():
write('>')
write(quote_xml(self.valueOf_))
self.exportChildren(write, level + 1, namespace_, name_)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='reference'):
pass
def exportChildren(self, write, level, namespace_='', name_='reference', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class reference
class property(GeneratedsSuper):
"""A property."""
subclass = None
superclass = None
def __init__(self, type_=None, valueOf_=None):
self.type_ = _cast(None, type_)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if property.subclass:
return property.subclass(*args_, **kwargs_)
else:
return property(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_PropertyTypeEnum(self, value):
# Validate type PropertyTypeEnum, a restriction on xs:string.
pass
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, write, level, namespace_='', name_='property', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='property')
if self.hasContent_():
write('>')
write(quote_xml(self.valueOf_))
self.exportChildren(write, level + 1, namespace_, name_)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='property'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
write(' type=%s' % (quote_attrib(self.type_), ))
def exportChildren(self, write, level, namespace_='', name_='property', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
self.validate_PropertyTypeEnum(self.type_) # validate type PropertyTypeEnum
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class property
class objectProperty(GeneratedsSuper):
"""Property; a reference to the object, a timestamp and an unbounded
set of properties. This is used to describe extra information
about an object. For example, to show the url parameter strings
associated with a particular URI object. Or to show file names
associated with a particular file. Properties can also be
applied to relationships, by referencing the relationship by id.
This allows use such as e.g. recording the post data sent in an
http request between a malware (file object) and a uri (uri
object)."""
subclass = None
superclass = None
def __init__(self, id=None, references=None, timestamp=None, property=None):
self.id = _cast(None, id)
self.references = references
self.timestamp = timestamp
if property is None:
self.property = []
else:
self.property = property
def factory(*args_, **kwargs_):
if objectProperty.subclass:
return objectProperty.subclass(*args_, **kwargs_)
else:
return objectProperty(*args_, **kwargs_)
factory = staticmethod(factory)
def get_references(self): return self.references
def set_references(self, references): self.references = references
def get_timestamp(self): return self.timestamp
def set_timestamp(self, timestamp): self.timestamp = timestamp
def get_property(self): return self.property
def set_property(self, property): self.property = property
def add_property(self, value): self.property.append(value)
def insert_property(self, index, value): self.property[index] = value
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='objectProperty', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='objectProperty')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='objectProperty'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id), ))
def exportChildren(self, write, level, namespace_='', name_='objectProperty', fromsubclass_=False):
if self.references is not None:
self.references.export(write, level, namespace_, name_='references', )
if self.timestamp is not None:
showIndent(write, level)
write('<%stimestamp>%s</%stimestamp>\n' % (namespace_, quote_xml(self.timestamp), namespace_))
for property_ in self.property:
property_.export(write, level, namespace_, name_='property')
def hasContent_(self):
if (
self.references is not None or
self.timestamp is not None or
self.property
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'references':
obj_ = references.factory()
obj_.build(child_)
self.set_references(obj_)
elif nodeName_ == 'timestamp':
timestamp_ = child_.text
timestamp_ = self.gds_validate_string(timestamp_, node, 'timestamp')
self.timestamp = timestamp_
elif nodeName_ == 'property':
obj_ = property.factory()
obj_.build(child_)
self.property.append(obj_)
# end class objectProperty
class relationship(GeneratedsSuper):
"""Relationships are used to express relationships between objects, and
dates. Relationships have a type (an attribute with a defined
list of allowed relationships), source (a set of xpath
references to the parent end of the relationship), target (xpath
references to the other end of the relationship) and an optional
date. The linking of objects with types is a powerful way of
describing data. The dates can be used to provide context. For
example, to assign a classification to an object, that can done
with an "isClassifiedAs" relationship, with the date meaning
that that was the data that that classification was assigned. To
show urls and the last visited data, this can be expressed as a
"verifiedBy" relationship between the urls and the entity doing
the verification, with the date interpreted as the verification
date."""
subclass = None
superclass = None
def __init__(self, type_=None, id=None, source=None, target=None, timestamp=None):
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.source = source
self.target = target
self.timestamp = timestamp
def factory(*args_, **kwargs_):
if relationship.subclass:
return relationship.subclass(*args_, **kwargs_)
else:
return relationship(*args_, **kwargs_)
factory = staticmethod(factory)
def get_source(self): return self.source
def set_source(self, source): self.source = source
def get_target(self): return self.target
def set_target(self, target): self.target = target
def get_timestamp(self): return self.timestamp
def set_timestamp(self, timestamp): self.timestamp = timestamp
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def validate_RelationshipTypeEnum(self, value):
# Validate type RelationshipTypeEnum, a restriction on xs:string.
pass
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='relationship', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='relationship')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='relationship'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
write(' type=%s' % (quote_attrib(self.type_), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id), ))
def exportChildren(self, write, level, namespace_='', name_='relationship', fromsubclass_=False):
if self.source is not None:
self.source.export(write, level, namespace_, name_='source', )
if self.target is not None:
self.target.export(write, level, namespace_, name_='target', )
if self.timestamp is not None:
showIndent(write, level)
write('<%stimestamp>%s</%stimestamp>\n' % (namespace_, quote_xml(self.timestamp), namespace_))
def hasContent_(self):
if (
self.source is not None or
self.target is not None or
self.timestamp is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
self.validate_RelationshipTypeEnum(self.type_) # validate type RelationshipTypeEnum
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'source':
obj_ = source.factory()
obj_.build(child_)
self.set_source(obj_)
elif nodeName_ == 'target':
obj_ = target.factory()
obj_.build(child_)
self.set_target(obj_)
elif nodeName_ == 'timestamp':
timestamp_ = child_.text
timestamp_ = self.gds_validate_string(timestamp_, node, 'timestamp')
self.timestamp = timestamp_
# end class relationship
class source(GeneratedsSuper):
"""References to objects at the parent end of the relationship."""
subclass = None
superclass = None
def __init__(self, ref=None):
if ref is None:
self.ref = []
else:
self.ref = ref
def factory(*args_, **kwargs_):
if source.subclass:
return source.subclass(*args_, **kwargs_)
else:
return source(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def add_ref(self, value): self.ref.append(value)
def insert_ref(self, index, value): self.ref[index] = value
def export(self, write, level, namespace_='', name_='source', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='source')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='source'):
pass
def exportChildren(self, write, level, namespace_='', name_='source', fromsubclass_=False):
for ref_ in self.ref:
ref_.export(write, level, namespace_, name_='ref')
def hasContent_(self):
if (
self.ref
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ref':
obj_ = reference.factory()
obj_.build(child_)
self.ref.append(obj_)
# end class source
class target(GeneratedsSuper):
"""References to objects at the child end of the relationship."""
subclass = None
superclass = None
def __init__(self, ref=None):
if ref is None:
self.ref = []
else:
self.ref = ref
def factory(*args_, **kwargs_):
if target.subclass:
return target.subclass(*args_, **kwargs_)
else:
return target(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def add_ref(self, value): self.ref.append(value)
def insert_ref(self, index, value): self.ref[index] = value
def export(self, write, level, namespace_='', name_='target', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='target')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='target'):
pass
def exportChildren(self, write, level, namespace_='', name_='target', fromsubclass_=False):
for ref_ in self.ref:
ref_.export(write, level, namespace_, name_='ref')
def hasContent_(self):
if (
self.ref
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ref':
obj_ = reference.factory()
obj_.build(child_)
self.ref.append(obj_)
# end class target
class softwarePackageObject(GeneratedsSuper):
"""Software package object, used to store information about a software
package, such as the vendor and version. Intended primarily for
the clean-file metadata sharing use case."""
subclass = None
superclass = None
def __init__(self, id=None, vendor=None, productgroup=None, product=None, version=None, update=None, edition=None, language=None, CPEname=None):
self.id = _cast(None, id)
self.vendor = vendor
self.productgroup = productgroup
self.product = product
self.version = version
self.update = update
self.edition = edition
self.language = language
self.CPEname = CPEname
def factory(*args_, **kwargs_):
if softwarePackageObject.subclass:
return softwarePackageObject.subclass(*args_, **kwargs_)
else:
return softwarePackageObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_vendor(self): return self.vendor
def set_vendor(self, vendor): self.vendor = vendor
def get_productgroup(self): return self.productgroup
def set_productgroup(self, productgroup): self.productgroup = productgroup
def get_product(self): return self.product
def set_product(self, product): self.product = product
def get_version(self): return self.version
def set_version(self, version): self.version = version
def get_update(self): return self.update
def set_update(self, update): self.update = update
def get_edition(self): return self.edition
def set_edition(self, edition): self.edition = edition
def get_language(self): return self.language
def set_language(self, language): self.language = language
def get_CPEname(self): return self.CPEname
def set_CPEname(self, CPEname): self.CPEname = CPEname
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='softwarePackageObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='softwarePackageObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='softwarePackageObject'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id)))
def exportChildren(self, write, level, namespace_='', name_='softwarePackageObject', fromsubclass_=False):
if self.vendor is not None:
showIndent(write, level)
write('<%svendor>%s</%svendor>\n' % (namespace_, quote_xml(self.vendor), namespace_))
if self.productgroup is not None:
showIndent(write, level)
write('<%sproductgroup>%s</%sproductgroup>\n' % (namespace_, quote_xml(self.productgroup), namespace_))
if self.product is not None:
showIndent(write, level)
write('<%sproduct>%s</%sproduct>\n' % (namespace_, quote_xml(self.product), namespace_))
if self.version is not None:
showIndent(write, level)
write('<%sversion>%s</%sversion>\n' % (namespace_, quote_xml(self.version), namespace_))
if self.update is not None:
showIndent(write, level)
write('<%supdate>%s</%supdate>\n' % (namespace_, quote_xml(self.update), namespace_))
if self.edition is not None:
showIndent(write, level)
write('<%sedition>%s</%sedition>\n' % (namespace_, quote_xml(self.edition), namespace_))
if self.language is not None:
showIndent(write, level)
write('<%slanguage>%s</%slanguage>\n' % (namespace_, quote_xml(self.language), namespace_))
if self.CPEname is not None:
self.CPEname.export(write, level, namespace_, name_='CPEname')
def hasContent_(self):
if (
self.vendor is not None or
self.productgroup is not None or
self.product is not None or
self.version is not None or
self.update is not None or
self.edition is not None or
self.language is not None or
self.CPEname is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'vendor':
vendor_ = child_.text
vendor_ = self.gds_validate_string(vendor_, node, 'vendor')
self.vendor = vendor_
elif nodeName_ == 'productgroup':
productgroup_ = child_.text
productgroup_ = self.gds_validate_string(productgroup_, node, 'productgroup')
self.productgroup = productgroup_
elif nodeName_ == 'product':
product_ = child_.text
product_ = self.gds_validate_string(product_, node, 'product')
self.product = product_
elif nodeName_ == 'version':
version_ = child_.text
version_ = self.gds_validate_string(version_, node, 'version')
self.version = version_
elif nodeName_ == 'update':
update_ = child_.text
update_ = self.gds_validate_string(update_, node, 'update')
self.update = update_
elif nodeName_ == 'edition':
edition_ = child_.text
edition_ = self.gds_validate_string(edition_, node, 'edition')
self.edition = edition_
elif nodeName_ == 'language':
language_ = child_.text
language_ = self.gds_validate_string(language_, node, 'language')
self.language = language_
elif nodeName_ == 'CPEname':
obj_ = CPEname.factory()
obj_.build(child_)
self.set_CPEname(obj_)
# end class softwarePackageObject
class CPEname(GeneratedsSuper):
"""The Common Platform Enumeration, or CPE, name of the package if one
exists. CPE is a structured naming scheme for IT systems,
software, and packages. For more information on CPE see
http://cpe.mitre.org. For the official CPE dictionary see
http://nvd.nist.gov/cpe.cfm.The version of CPE that is used for
the name in the CPEname element. As of 10/04/2011 this is 2.2."""
subclass = None
superclass = None
def __init__(self, cpeVersion=None, valueOf_=None):
self.cpeVersion = _cast(None, cpeVersion)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CPEname.subclass:
return CPEname.subclass(*args_, **kwargs_)
else:
return CPEname(*args_, **kwargs_)
factory = staticmethod(factory)
def get_cpeVersion(self): return self.cpeVersion
def set_cpeVersion(self, cpeVersion): self.cpeVersion = cpeVersion
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, write, level, namespace_='', name_='CPEname', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='CPEname')
if self.hasContent_():
write('>')
write(quote_xml(self.valueOf_))
self.exportChildren(write, level + 1, namespace_, name_)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='CPEname'):
if self.cpeVersion is not None and 'cpeVersion' not in already_processed:
already_processed.append('cpeVersion')
write(' cpeVersion=%s' % (quote_attrib(self.cpeVersion)))
def exportChildren(self, write, level, namespace_='', name_='CPEname', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('cpeVersion', node)
if value is not None and 'cpeVersion' not in already_processed:
already_processed.append('cpeVersion')
self.cpeVersion = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class CPEname
class digitalSignatureObject(GeneratedsSuper):
"""Digital signature object, used to hold information about digitally
signed binaries with regards to the certificate used and its
validity."""
subclass = None
superclass = None
def __init__(self, type_=None, id=None, certificateIssuer=None, certificateSubject=None, certificateValidity=None, certificateRevocationTimestamp=None, signingTimestamp=None):
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.certificateIssuer = certificateIssuer
self.certificateSubject = certificateSubject
self.certificateValidity = certificateValidity
self.certificateRevocationTimestamp = certificateRevocationTimestamp
self.signingTimestamp = signingTimestamp
def factory(*args_, **kwargs_):
if digitalSignatureObject.subclass:
return digitalSignatureObject.subclass(*args_, **kwargs_)
else:
return digitalSignatureObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_certificateIssuer(self): return self.certificateIssuer
def set_certificateIssuer(self, certificateIssuer): self.certificateIssuer = certificateIssuer
def get_certificateSubject(self): return self.certificateSubject
def set_certificateSubject(self, certificateSubject): self.certificateSubject = certificateSubject
def get_certificateValidity(self): return self.certificateValidity
def set_certificateValidity(self, certificateValidity): self.certificateValidity = certificateValidity
def get_certificateRevocationTimestamp(self): return self.certificateRevocationTimestamp
def set_certificateRevocationTimestamp(self, certificateRevocationTimestamp): self.certificateRevocationTimestamp = certificateRevocationTimestamp
def get_signingTimestamp(self): return self.signingTimestamp
def set_signingTimestamp(self, signingTimestamp): self.signingTimestamp = signingTimestamp
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='digitalSignatureObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='digitalSignatureObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='digitalSignatureObject'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
write(' type=%s' % (quote_attrib(self.type_)))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id)))
def exportChildren(self, write, level, namespace_='', name_='digitalSignatureObject', fromsubclass_=False):
if self.certificateIssuer is not None:
showIndent(write, level)
write('<%scertificateIssuer>%s</%scertificateIssuer>\n' % (namespace_, quote_xml(self.certificateIssuer), namespace_))
if self.certificateSubject is not None:
showIndent(write, level)
write('<%scertificateSubject>%s</%scertificateSubject>\n' % (namespace_, quote_xml(self.certificateSubject), namespace_))
if self.certificateValidity is not None:
showIndent(write, level)
write('<%scertificateValidity>%s</%scertificateValidity>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.certificateValidity)), input_name='certificateValidity'), namespace_))
if self.certificateRevocationTimestamp is not None:
showIndent(write, level)
write('<%scertificateRevocationTimestamp>%s</%scertificateRevocationTimestamp>\n' % (namespace_, quote_xml(self.certificateRevocationTimestamp), namespace_))
if self.signingTimestamp is not None:
self.signingTimestamp.export(write, level, namespace_, name_='signingTimestamp')
def hasContent_(self):
if (
self.certificateIssuer is not None or
self.certificateSubject is not None or
self.certificateValidity is not None or
self.certificateRevocationTimestamp is not None or
self.signingTimestamp is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'certificateIssuer':
certificateIssuer_ = child_.text
certificateIssuer_ = self.gds_validate_string(certificateIssuer_, node, 'certificateIssuer')
self.certificateIssuer = certificateIssuer_
elif nodeName_ == 'certificateSubject':
certificateSubject_ = child_.text
certificateSubject_ = self.gds_validate_string(certificateSubject_, node, 'certificateSubject')
self.certificateSubject = certificateSubject_
elif nodeName_ == 'certificateValidity':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'certificateValidity')
self.certificateValidity = ival_
elif nodeName_ == 'certificateRevocationTimestamp':
certificateRevocationTimestamp_ = child_.text
certificateRevocationTimestamp_ = self.gds_validate_string(certificateRevocationTimestamp_, node, 'certificateRevocationTimestamp')
self.certificateRevocationTimestamp = certificateRevocationTimestamp_
elif nodeName_ == 'signingTimestamp':
obj_ = signingTimestamp.factory()
obj_.build(child_)
self.set_signingTimestamp(obj_)
# end class digitalSignatureObject
class signingTimestamp(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valid=None, valueOf_=None):
self.valid = _cast(bool, valid)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if signingTimestamp.subclass:
return signingTimestamp.subclass(*args_, **kwargs_)
else:
return signingTimestamp(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valid(self): return self.valid
def set_valid(self, valid): self.valid = valid
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, write, level, namespace_='', name_='signingTimestamp', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='signingTimestamp')
if self.hasContent_():
write('>')
write(quote_xml(self.valueOf_))
self.exportChildren(write, level + 1, namespace_, name_)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='signingTimestamp'):
if self.valid is not None and 'valid' not in already_processed:
already_processed.append('valid')
write(' valid="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.valid)), input_name='valid'))
def exportChildren(self, write, level, namespace_='', name_='signingTimestamp', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('valid', node)
if value is not None and 'valid' not in already_processed:
already_processed.append('valid')
if value in ('true', '1'):
self.valid = True
elif value in ('false', '0'):
self.valid = False
else:
raise_parse_error(node, 'Bad boolean attribute')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class signingTimestamp
class taggantObject(GeneratedsSuper):
"""Taggant object, for use in characterizing the software taggant that
may be associated with a file or multiple files. For more
information on the taggant system or the IEEE Malware Working
Group that created it, please see
http://standards.ieee.org/develop/indconn/icsg/malware.html."""
subclass = None
superclass = None
def __init__(self, id=None, vendorID=None, taggantValidity=None, signingTimestamp=None):
self.id = _cast(None, id)
self.vendorID = vendorID
self.taggantValidity = taggantValidity
self.signingTimestamp = signingTimestamp
def factory(*args_, **kwargs_):
if taggantObject.subclass:
return taggantObject.subclass(*args_, **kwargs_)
else:
return taggantObject(*args_, **kwargs_)
factory = staticmethod(factory)
def get_vendorID(self): return self.vendorID
def set_vendorID(self, vendorID): self.vendorID = vendorID
def get_taggantValidity(self): return self.taggantValidity
def set_taggantValidity(self, taggantValidity): self.taggantValidity = taggantValidity
def get_signingTimestamp(self): return self.signingTimestamp
def set_signingTimestamp(self, signingTimestamp): self.signingTimestamp = signingTimestamp
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, write, level, namespace_='', name_='taggantObject', namespacedef_=''):
showIndent(write, level)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(write, level, already_processed, namespace_, name_='taggantObject')
if self.hasContent_():
write('>\n')
self.exportChildren(write, level + 1, namespace_, name_)
showIndent(write, level)
write('</%s%s>\n' % (namespace_, name_))
else:
write('/>\n')
def exportAttributes(self, write, level, already_processed, namespace_='', name_='taggantObject'):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
write(' id=%s' % (quote_attrib(self.id)))
def exportChildren(self, write, level, namespace_='', name_='taggantObject', fromsubclass_=False):
if self.vendorID is not None:
showIndent(write, level)
write('<%svendorID>%s</%svendorID>\n' % (namespace_, quote_xml(self.vendorID), namespace_))
if self.taggantValidity is not None:
showIndent(write, level)
write('<%staggantValidity>%s</%staggantValidity>\n' % (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.taggantValidity)), input_name='taggantValidity'), namespace_))
if self.signingTimestamp is not None:
self.signingTimestamp.export(write, level, namespace_, name_='signingTimestamp')
def hasContent_(self):
if (
self.vendorID is not None or
self.taggantValidity is not None or
self.signingTimestamp is not None
):
return True
else:
return False
def build(self, node):
self.__sourcenode__ = node
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'vendorID':
vendorID_ = child_.text
vendorID_ = self.gds_validate_string(vendorID_, node, 'vendorID')
self.vendorID = vendorID_
elif nodeName_ == 'taggantValidity':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'taggantValidity')
self.taggantValidity = ival_
elif nodeName_ == 'signingTimestamp':
obj_ = signingTimestamp.factory()
obj_.build(child_)
self.set_signingTimestamp(obj_)
# end class taggantObject
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'malwareMetaData'
rootClass = malwareMetaData
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseString(inString):
from mixbox.vendor.six import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'malwareMetaData'
rootClass = malwareMetaData
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="malwareMetaData",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'malwareMetaData'
rootClass = malwareMetaData
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from mmdef import *\n\n')
sys.stdout.write('import mmdef as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"ASNObject",
"CPEname",
"IPAddress",
"IPObject",
"classificationDetails",
"classificationObject",
"digitalSignatureObject",
"domainObject",
"entityObject",
"extraHash",
"fieldData",
"fieldDataEntry",
"fileObject",
"location",
"malwareMetaData",
"objectProperties",
"objectProperty",
"objects",
"property",
"reference",
"references",
"registryObject",
"relationship",
"relationships",
"signingTimestamp",
"softwarePackageObject",
"source",
"taggantObject",
"target",
"uriObject",
"volume"
]
|
{
"content_hash": "f58acacac01a0a8084963dbed2f8c1e5",
"timestamp": "",
"source": "github",
"line_count": 3342,
"max_line_length": 588,
"avg_line_length": 47.39108318372232,
"alnum_prop": 0.6269754579147752,
"repo_name": "MAECProject/python-maec",
"id": "b70d006d7191a53b5d626e74f2130a9ade0df109",
"size": "158622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maec/bindings/mmdef_1_2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "862178"
}
],
"symlink_target": ""
}
|
from block import *
import os
import time
import base64
from logging import ERROR, WARN, INFO, DEBUG
class file_data_reader(Block):
def on_load(self, config):
self.config = config
self.add_port("input", Port.PUSH, Port.UNNAMED, ["url"])
self.add_port("output", Port.PUSH, Port.UNNAMED, ["url", "data"])
def get_data(self, log):
return [base64.b64encode(BlockUtils.fetch_file_at_url(u, self.ip_address))
for u in log["url"]]
def recv_push(self, port, log):
if log.log.has_key("token"):
self.log(INFO, self.id + " got the finish token for directory " + log.log["token"][0])
else:
log.append_field("data", self.get_data(log.log))
self.buffered_push("output", log)
if self.config.has_key("sleep"):
time.sleep(self.config["sleep"])
|
{
"content_hash": "0e0ab19df6d745a433357867cb25f8fd",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 92,
"avg_line_length": 32.12,
"alnum_prop": 0.6450809464508095,
"repo_name": "mpi-sws-rse/datablox",
"id": "25861e0aba3ded9f19e6d6ae0e662777e412c33b",
"size": "803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blox/file_data_reader__1_0/b_file_data_reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7801"
},
{
"name": "Python",
"bytes": "378676"
},
{
"name": "Shell",
"bytes": "7299"
}
],
"symlink_target": ""
}
|
"""
FILE: sample_begin_get_route_directions_batch.py
DESCRIPTION:
This sample demonstrates how to sends batches of route direction queries.
USAGE:
python begin_get_route_directions_batch.py
Set the environment variables with your own values before running the sample:
- AZURE_SUBSCRIPTION_KEY - your subscription key
"""
import os
subscription_key = os.getenv("AZURE_SUBSCRIPTION_KEY")
def begin_get_route_directions_batch():
from azure.core.credentials import AzureKeyCredential
from azure.maps.route import MapsRouteClient
maps_route_client = MapsRouteClient(credential=AzureKeyCredential(subscription_key))
result = maps_route_client.begin_get_route_directions_batch(
queries=[
"47.620659,-122.348934:47.610101,-122.342015&travelMode=bicycle&routeType=eco&traffic=false"
]
)
print("Get route directions batch batch_id to fetch the result later")
print(result.batch_id)
if __name__ == '__main__':
begin_get_route_directions_batch()
|
{
"content_hash": "9724438181787be15cd1c53e134411c5",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 104,
"avg_line_length": 32.774193548387096,
"alnum_prop": 0.7312992125984252,
"repo_name": "Azure/azure-sdk-for-python",
"id": "08e21b6f79c8cd38ce144ef72a4319aa76ea918e",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/maps/azure-maps-route/samples/sample_begin_get_route_directions_batch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _, ungettext
from .models import Comment
from .views.moderation import perform_flag, perform_approve, perform_delete
from tango_admin.admin_actions import nuke_users
class UsernameSearch(object):
"""The User object may not be auth.User, so we need to provide
a mechanism for issuing the equivalent of a .filter(user__username=...)
search in CommentAdmin.
"""
def __str__(self):
return 'user__%s' % get_user_model().USERNAME_FIELD
class CommentsAdmin(admin.ModelAdmin):
readonly_fields = ("post_date",)
fieldsets = (
(None, {'fields': ('content_type', 'object_pk', 'site')}),
(_('Content'), {'fields': ('user', 'text')}),
(_('Metadata'), {'fields': ('post_date', 'ip_address', 'is_public', 'is_removed')}),
)
list_display = (
'user',
'content_type',
'object_pk',
'ip_address',
'post_date',
'is_public',
'is_removed'
)
list_filter = ('post_date', 'site', 'is_public', 'is_removed')
date_hierarchy = 'post_date'
ordering = ('-post_date',)
raw_id_fields = ('user',)
search_fields = ('text', UsernameSearch(), 'user__name', 'user__email', 'ip_address')
actions = ["flag_comments", "approve_comments", "remove_comments", nuke_users]
def get_actions(self, request):
actions = super(CommentsAdmin, self).get_actions(request)
# Only superusers should be able to delete the comments from the DB.
if not request.user.is_superuser and 'delete_selected' in actions:
actions.pop('delete_selected')
if not request.user.has_perm('comments.can_moderate'):
if 'approve_comments' in actions:
actions.pop('approve_comments')
if 'remove_comments' in actions:
actions.pop('remove_comments')
return actions
def flag_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_flag,
lambda n: ungettext('flagged', 'flagged', n))
flag_comments.short_description = _("Flag selected comments")
def approve_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_approve,
lambda n: ungettext('approved', 'approved', n))
approve_comments.short_description = _("Approve selected comments")
def remove_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_delete,
lambda n: ungettext('removed', 'removed', n))
remove_comments.short_description = _("Remove selected comments")
def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action. Actually
calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ungettext('1 comment was successfully %(action)s.',
'%(count)s comments were successfully %(action)s.',
n_comments)
self.message_user(request, msg % {'count': n_comments, 'action': done_message(n_comments)})
admin.site.register(Comment, CommentsAdmin)
|
{
"content_hash": "ad1b36178540eabf7c7f3cb5f5442bb4",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 99,
"avg_line_length": 39.310344827586206,
"alnum_prop": 0.6163742690058479,
"repo_name": "tBaxter/tango-comments",
"id": "f3af3243095ff6382073b26ad17dad6571487636",
"size": "3420",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/tango_comments/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17438"
},
{
"name": "Python",
"bytes": "200604"
}
],
"symlink_target": ""
}
|
import unittest
import pyqubes.validate
import pyqubes.constants
class TestvalidateLinuxUsername(unittest.TestCase):
def test_validate_linux_username_valid_simple(self):
input_string = "foobar"
valid_input_string = pyqubes.validate.linux_username(input_string)
self.assertEqual(valid_input_string, input_string)
def test_validate_linux_username_valid_complex(self):
input_string = "_f00-bar$"
valid_input_string = pyqubes.validate.linux_username(input_string)
self.assertEqual(valid_input_string, input_string)
def test_validate_linux_username_invalid_spaces(self):
input_string = "foo bar"
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.linux_username(input_string)
def test_validate_linux_username_invalid_numeric(self):
input_string = "1234foobar"
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.linux_username(input_string)
def test_validate_linux_username_invalid_length_zero(self):
input_string = ""
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.linux_username(input_string)
def test_validate_linux_username_invalid_length_long(self):
input_string = "fb" * 32
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.linux_username(input_string)
class TestvalidateLinuxHostname(unittest.TestCase):
def test_validate_linux_hostname_valid_simple(self):
input_string = "foobar"
valid_input_string = pyqubes.validate.linux_hostname(input_string)
self.assertEqual(valid_input_string, input_string)
def test_validate_linux_hostname_valid_constant(self):
input_string = pyqubes.constants.FEDORA_23
valid_input_string = pyqubes.validate.linux_hostname(input_string)
self.assertEqual(valid_input_string, input_string)
def test_validate_linux_hostname_valid_complex(self):
input_string = "edge.had-oop.1234-"
valid_input_string = pyqubes.validate.linux_hostname(input_string)
self.assertEqual(valid_input_string, input_string)
def test_validate_linux_hostname_invalid_spaces(self):
input_string = "foo bar"
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.linux_hostname(input_string)
def test_validate_linux_hostname_invalid_period(self):
input_string = "."
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.linux_hostname(input_string)
def test_validate_linux_hostname_invalid_trailing(self):
input_string = "trailing."
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.linux_hostname(input_string)
def test_validate_linux_hostname_invalid_length_zero(self):
input_string = ""
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.linux_hostname(input_string)
def test_validate_linux_hostname_invalid_length_long(self):
input_string = "fb" * 256
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.linux_hostname(input_string)
class TestvalidateFirewallPolicy(unittest.TestCase):
def test_validate_firewall_policy_valid(self):
input_string = "allow"
valid_input_string = pyqubes.validate.firewall_policy(input_string)
self.assertEqual(valid_input_string, input_string)
def test_validate_firewall_policy_valid_constant(self):
input_string = pyqubes.constants.DENY
valid_input_string = pyqubes.validate.firewall_policy(input_string)
self.assertEqual(valid_input_string, input_string)
def test_validate_firewall_policy_invalid_string(self):
input_string = "fooxbar"
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.firewall_policy(input_string)
def test_validate_firewall_policy_invalid_length_zero(self):
input_string = ""
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.firewall_policy(input_string)
class TestvalidateLabelColor(unittest.TestCase):
def test_validate_label_color_valid(self):
input_string = "orange"
valid_input_string = pyqubes.validate.label_color(input_string)
self.assertEqual(valid_input_string, input_string)
def test_validate_label_color_valid_constant(self):
input_string = pyqubes.constants.GRAY
valid_input_string = pyqubes.validate.label_color(input_string)
self.assertEqual(valid_input_string, input_string)
def test_validate_label_color_invalid_string(self):
input_string = "fooxbar"
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.label_color(input_string)
def test_validate_label_color_invalid_length_zero(self):
input_string = ""
with self.assertRaises(ValueError):
valid_input_string = pyqubes.validate.label_color(input_string)
|
{
"content_hash": "75868c3b059560172420312b3d3e7c60",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 42.85,
"alnum_prop": 0.6997277323998444,
"repo_name": "tommilligan/pyqubes",
"id": "d03407c791469abc90c286e86bdf016f767fb993",
"size": "5165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_validate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47332"
}
],
"symlink_target": ""
}
|
import sys, re
from engine.database import Database
db = Database("numbers.sqlite")
with db as conn:
for row in conn.execute("SELECT * from log ORDER BY time"):
print(row)
|
{
"content_hash": "4e588a33a1376001bf89fb712e9d95a4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 63,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.6972972972972973,
"repo_name": "benjaveri/phonescreen",
"id": "ee2c06826e132780c7900038b48871fe73bf78f6",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v2/log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "22242"
}
],
"symlink_target": ""
}
|
import operator
from collections import OrderedDict, defaultdict
class LRUDict(OrderedDict):
# https://gist.github.com/davesteele/44793cd0348f59f8fadd49d7799bd306
def __init__(self, *args, maximum_length: int, **kwargs):
assert maximum_length > 0
self.maximum_length = maximum_length
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
super().__setitem__(key, value)
super().move_to_end(key)
while len(self) > self.maximum_length:
super().__delitem__(next(iter(self)))
def __getitem__(self, key):
value = super().__getitem__(key)
super().move_to_end(key)
return value
def group_by(items, key):
items_by_key = defaultdict(list)
for item in items:
items_by_key[item[key]].append(item)
return dict(items_by_key)
def find_item(
items, key, value, get_value=lambda item, key: getattr(item, key),
normalize=lambda _: _, compare=operator.eq):
normalized_value = normalize(value)
def is_match(item):
try:
v = get_value(item, key)
except KeyError:
is_match = False
else:
normalized_v = normalize(v)
is_match = compare(normalized_value, normalized_v)
return is_match
return next(filter(is_match, items))
def extend_uniquely(old_items, new_items):
old_items.extend(_ for _ in new_items if _ not in old_items)
|
{
"content_hash": "b9638f53b09d3cddcfb8cfca463e92d6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 74,
"avg_line_length": 28.647058823529413,
"alnum_prop": 0.6071184120465435,
"repo_name": "crosscompute/crosscompute",
"id": "75455edf9e67353edbc3774bcec3795142fe26fe",
"size": "1461",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "crosscompute/macros/iterable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "227"
},
{
"name": "HTML",
"bytes": "1017"
},
{
"name": "JavaScript",
"bytes": "5264"
},
{
"name": "Jinja",
"bytes": "4487"
},
{
"name": "Python",
"bytes": "270455"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
}
|
"""
core-daemon: the CORE daemon is a server process that receives CORE API
messages and instantiates emulated nodes and networks within the kernel. Various
message handlers are defined and some support for sending messages.
"""
import argparse
import logging
import os
import time
from configparser import ConfigParser
from pathlib import Path
from core import constants
from core.api.grpc.server import CoreGrpcServer
from core.constants import CORE_CONF_DIR, COREDPY_VERSION
from core.emulator.coreemu import CoreEmu
from core.utils import load_logging_config
logger = logging.getLogger(__name__)
def banner():
"""
Output the program banner printed to the terminal or log file.
:return: nothing
"""
logger.info("CORE daemon v.%s started %s", constants.COREDPY_VERSION, time.ctime())
def cored(cfg):
"""
Start the CoreServer object and enter the server loop.
:param dict cfg: core configuration
:return: nothing
"""
# initialize grpc api
coreemu = CoreEmu(cfg)
grpc_server = CoreGrpcServer(coreemu)
address_config = cfg["grpcaddress"]
port_config = cfg["grpcport"]
grpc_address = f"{address_config}:{port_config}"
grpc_server.listen(grpc_address)
def get_merged_config(filename):
"""
Return a configuration after merging config file and command-line arguments.
:param str filename: file name to merge configuration settings with
:return: merged configuration
:rtype: dict
"""
# these are the defaults used in the config file
default_log = os.path.join(constants.CORE_CONF_DIR, "logging.conf")
default_grpc_port = "50051"
default_address = "localhost"
defaults = {
"grpcport": default_grpc_port,
"grpcaddress": default_address,
"logfile": default_log,
}
parser = argparse.ArgumentParser(
description=f"CORE daemon v.{COREDPY_VERSION} instantiates Linux network namespace nodes."
)
parser.add_argument(
"-f",
"--configfile",
dest="configfile",
help=f"read config from specified file; default = {filename}",
)
parser.add_argument(
"--ovs",
action="store_true",
help="enable experimental ovs mode, default is false",
)
parser.add_argument(
"--grpc-port",
dest="grpcport",
help=f"grpc port to listen on; default {default_grpc_port}",
)
parser.add_argument(
"--grpc-address",
dest="grpcaddress",
help=f"grpc address to listen on; default {default_address}",
)
parser.add_argument(
"-l", "--logfile", help=f"core logging configuration; default {default_log}"
)
# parse command line options
args = parser.parse_args()
# convert ovs to internal format
args.ovs = "1" if args.ovs else "0"
# read the config file
if args.configfile is not None:
filename = args.configfile
del args.configfile
cfg = ConfigParser(defaults)
cfg.read(filename)
section = "core-daemon"
if not cfg.has_section(section):
cfg.add_section(section)
# merge argparse with configparser
for opt in vars(args):
val = getattr(args, opt)
if val is not None:
cfg.set(section, opt, str(val))
return dict(cfg.items(section))
def main():
"""
Main program startup.
:return: nothing
"""
cfg = get_merged_config(f"{CORE_CONF_DIR}/core.conf")
log_config_path = Path(cfg["logfile"])
load_logging_config(log_config_path)
banner()
try:
cored(cfg)
except KeyboardInterrupt:
logger.info("keyboard interrupt, stopping core daemon")
if __name__ == "__main__":
main()
|
{
"content_hash": "fbe2eee2a7c6e59b938c60bcb4b70820",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 98,
"avg_line_length": 28.423076923076923,
"alnum_prop": 0.6568335588633288,
"repo_name": "coreemu/core",
"id": "6b9caa54e90e6ab3e87d4120560e56dbe8bf23ca",
"size": "3695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daemon/core/scripts/daemon.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "92828"
},
{
"name": "HTML",
"bytes": "329"
},
{
"name": "M4",
"bytes": "7568"
},
{
"name": "Makefile",
"bytes": "11209"
},
{
"name": "Python",
"bytes": "1530671"
},
{
"name": "Shell",
"bytes": "25741"
}
],
"symlink_target": ""
}
|
"""
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import warnings
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal)
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.datasets.samples_generator import make_classification
from sklearn.metrics import f1_score
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.fixes import unique
from sklearn.utils.testing import assert_greater, assert_less
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
"""
Test parameters on classes that make use of libsvm.
"""
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
"""
Check consistency on dataset iris.
"""
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
def test_single_sample_1d():
"""
Test whether SVCs work on a single sample given as a 1-d array
"""
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
"""
SVC with a precomputed kernel.
We test it with a toy dataset and with iris.
"""
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[0.25, -.25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
"""
Test Support Vector Regression
"""
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.)):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
"""
Test OneClassSVM
"""
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
"""
Test OneClassSVM decision function
"""
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
"""
Make sure some tweaking of parameters works.
We change clf.dual_coef_ at run time and expect .predict() to change
accordingly. Notice that this is not trivial since it involves a lot
of C/Python copying in the libsvm bindings.
The success of this test ensures that the mapping between libsvm and
the python classifier is complete.
"""
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[.25, -.25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf.dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
"""
Predict probabilities using SVC
This uses cross validation, so we use a slightly bigger testing set.
"""
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
"""
Test decision_function
Sanity check, test that decision_function implemented in python
returns the same as the one in libsvm
"""
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec, clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([[-1.], [-0.66], [-1.], [0.66], [1.], [1.]])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_weight():
"""
Test class weights
"""
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
"""
Test weights on individual samples
"""
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
"""Test class weights for imbalanced data"""
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="auto"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes, y_ind = unique(y[unbalanced], return_inverse=True)
class_weights = compute_class_weight('auto', classes, y_ind)
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='auto' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='auto')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred)
<= metrics.f1_score(y, y_pred_balanced))
def test_bad_input():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_linearsvc_parameters():
"""
Test possible parameter combinations in LinearSVC
"""
# generate list of possible parameter combinations
params = [(dual, loss, penalty) for dual in [True, False]
for loss in ['l1', 'l2', 'lr'] for penalty in ['l1', 'l2']]
for dual, loss, penalty in params:
if loss == 'l1' and penalty == 'l1':
assert_raises(ValueError, svm.LinearSVC, penalty=penalty,
loss=loss, dual=dual)
elif loss == 'l1' and penalty == 'l2' and not dual:
assert_raises(ValueError, svm.LinearSVC, penalty=penalty,
loss=loss, dual=dual)
elif penalty == 'l1' and dual:
assert_raises(ValueError, svm.LinearSVC, penalty=penalty,
loss=loss, dual=dual)
else:
svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
def test_linearsvc():
"""
Test basic routines using LinearSVC
"""
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='l1', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
"""Test LinearSVC with crammer_singer multi-class svm"""
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
"""Test Crammer-Singer formulation in the binary case"""
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_almost_equal(acc, 0.68)
def test_linearsvc_iris():
"""
Test that LinearSVC gives plausible predictions on the iris dataset
Also, test symbolic class names (classes_).
"""
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
"""
Test that dense liblinear honours intercept_scaling param
"""
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='l2',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
"""Check that primal coef modification are not silently ignored"""
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_inheritance():
# check that SVC classes can do inheritance
class ChildSVC(svm.SVC):
def __init__(self, foo=0):
self.foo = foo
svm.SVC.__init__(self)
clf = ChildSVC()
clf.fit(iris.data, iris.target)
clf.predict(iris.data[-1])
clf.decision_function(iris.data[-1])
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0)
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
with warnings.catch_warnings(record=True) as foo:
# Hackish way to reset the warning counter
from sklearn.svm import base
base.__warningregistry__ = {}
warnings.simplefilter("always")
a.fit(X, Y)
assert_equal(len(foo), 1, msg=foo)
assert_equal(foo[0].category, ConvergenceWarning, msg=foo[0].category)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
if __name__ == '__main__':
import nose
nose.runmodule()
|
{
"content_hash": "20c8f2ba64ab8b4f47b2bf2f5fc6a726",
"timestamp": "",
"source": "github",
"line_count": 676,
"max_line_length": 78,
"avg_line_length": 33.71597633136095,
"alnum_prop": 0.6202614952614952,
"repo_name": "B3AU/waveTree",
"id": "8b0ad94d0af93d9825caec313d2f44a30efbc1e4",
"size": "22792",
"binary": false,
"copies": "4",
"ref": "refs/heads/waveTree",
"path": "sklearn/svm/tests/test_svm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "16688671"
},
{
"name": "C++",
"bytes": "257608"
},
{
"name": "CSS",
"bytes": "169185"
},
{
"name": "JavaScript",
"bytes": "79924"
},
{
"name": "Python",
"bytes": "4675499"
},
{
"name": "Shell",
"bytes": "3727"
}
],
"symlink_target": ""
}
|
'''
nje_limbcorr_v2
Author: Nicholas J. Elmer
Department of Atmospheric Science, University of Alabama in Huntsville (UAH)
NASA Short-term Prediction Research and Transition (SPoRT) Center
Huntsville, Alabama, USA
nicholas.j.elmer@nasa.gov
Version: 2.0 (Last updated June 2016)
Note: The function call is different than in Version 1.x
Revision history:
Mar 2015 Version 1.0 released
Jan 2016 Enabled limb correction of projected data (v1.1)
May 2016 Himawari AHI correction coefficients added
Created tables for additional infrared bands,
not just the common RGB bands
Modified function arguments to include satellite name to
support more instruments
Jun 2016 Version 2.0 released
Description:
Limb correct polar and geostationary infrared imagery
Input:
Required:
data: 2D or 3D array, LxPxB array of brightness temperatures
L = number of scans,
P = number of pixels per scan,
B = number of bands
The data can be in any projection.
lats: array (2D), latitude array of full swath/image
vza: array (2D), array of viewing zenith angles for full swath/image (radians)
satellite: string, name of satellite
Valid options for each sensor:
SENSOR SATELLITE
========= =========================
abi goes-r
ahi himawari
avhrr noaa-15, noaa-18, noaa-19,
metop-a, metop-b
modis aqua, terra
seviri meteosat
viirs* snpp
The satellite name is only used for intercalibration.
*Moderate Resolution bands only (M12-M16)
sensor: string, name of sensor/instrument
Valid options: abi, ahi, avhrr, modis, seviri, viirs
jday: integer/float, Julian day / day of year
bandorder: int array, list of bands in same order as 3D data array (Ex. [12,14,15]).
If an unsupported band is listed, that band is not
corrected and is returned unchanged.
Optional:
fillval: float, Fill value for masking bad values (e.g., points beyond full disk,
negative brightness temperatures, etc.). Default is nan.
highres: boolean, Performs a high-spatial-resolution limb correction,
i.e., the full latitude array is used. By default,
only the latitude of the center pixel of each scan
is used to speed up processing, thereby assuming that
the latitude of an entire scan is constant.
Note that the highres option takes approximately
100-200 times longer than the default.
ctp: array (2D), Cloud top pressure in hPa, which must be the same resolution
as the data array. If cloud top pressure is not provided,
all pixels are assumed to be cloudfree, i.e., the full
optical pathlength is corrected for limb effects.
tblpath: string, path containing the limb correction tables.
This path will match the path containing the README file
when the release is first downloaded.
Default is './nje_limbcorr_v2.0/tables/'.
refsen: string, reference sensor ('ahi', 'abi', 'seviri')
Used for intercalibration to reference sensor
using methodology in Elmer et al. (2016; see README
for full citation). By default, the brightness
temperatures are intercalibrated to SEVIRI.
To prevent intercalibration, use the null string ''.
Returns:
An array of limb corrected brightness temperatures of the same size as the input data array.
Required Python modules:
itertools
numpy
time
os
Internal Functions:
calc_coeff
calc_cld
Notes on intecalibration:
- Intercalibration values are only available for IR bands listed in Elmer et al. (2016).
For all other channels, no intercalibration is performed.
- GOES-R ABI is not yet operational, so intercalibration values w.r.t. other sensors
cannot be calculated empirically. Since ABI is very similar to AHI,
AHI intercalibration values are used when ABI is specified.
- Intercalibration values may change over time as sensors age or instrument calibration
is updated.
Example::
from nje_limbcorr_v2 import limbcorr
import numpy as np
band27 = np.array([ [270., 270., 270.],[270., 270., 270.],[270., 270., 270.] ])
band28 = np.array([ [280., 280., 280.],[280., 280., 280.],[280., 280., 280.] ])
band31 = np.array([ [290., 290., 290.],[290., 290., 290.],[290., 290., 290.] ])
band32 = np.array([ [300., 300., 300.],[300., 300., 300.],[300., 300., 300.] ])
data = np.dstack(( band28, band27, band32, band31))
latitudes = np.array([ [35., 35., 35.],[34., 34., 34.],[33., 33., 33.] ])
vza = np.array([ [80., 0., 80.],[80., 0., 80.],[80., 0., 80.] ])
cldtp = np.array([ [100., 600., 1000.],[600., 650., 700.],[300., 400., 500.] ])
# Without cloud correction
no_cldcorr = limbcorr(data, latitudes, vza*(np.pi/180.), 'meteosat', 'seviri', 320, [5, 6, 32, 31])
# With cloud correction
yes_cldcorr = limbcorr(data, latitudes, vza, 'aqua', 'modis', 320, [28, 27, 32, 31], \
refsen='ahi', highres=True, ctp=cldtp, \
tblpath='~/mypath/nje_limbcorr_v2.0/tables/', fillval=0.0)
'''
import numpy as np
import time
import itertools
import os
# ==== INTERNAL FUNCTIONS ========================================
# Calculate limb correction coefficients
def calc_coeff(ma, mb, jday, lats, order=9):
coeffA = np.zeros_like(lats)
coeffB = np.zeros_like(lats)
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
value = jday**i * lats**j
coeffA += ma[k] * value
coeffB += mb[k] * value
return (coeffA, coeffB)
# Calculate cloud correction coefficients
def calc_cld(mc, jday, lats, order=9):
jday = jday/100.
lats = lats/100.
cld = np.zeros_like(lats)
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
value = jday**i * lats**j
cld += mc[k] * value
return cld/100.
# ==============================================================
# Main Limb Correction Function
def limbcorr(data, lats, vza, satellite, sensor, jday, bandorder, \
refsen='seviri', highres=False, ctp=[], \
tblpath='./nje_limbcorr_v2.0/tables/', fillval=np.nan):
# Print input information
print '\nSat. & Sensor : %s %s' %(satellite, sensor)
print 'Day of year : %03i' %(jday)
print 'Bands : %s' %(bandorder)
print 'Ref. Sensor : %s' %(refsen)
print 'Data Shape :', (np.shape(data))
print 'Lat/Lon Shape :', (np.shape(lats))
print 'VZA Shape :', (np.shape( vza))
# Set up cloud correction, if needed
if ctp == []: cldcorr = False
else:
cldcorr = True
pressures = np.array([0.10, 0.29, 0.51, 0.80, 1.15, \
1.58, 2.08, 2.67, 3.36, 4.19, 5.20, \
6.44, 7.98, 9.89, 12.26, 15.19, 18.82, \
23.31, 28.88, 35.78, 44.33, 54.62, 66.62, \
80.40, 95.98, 113.42, 132.76, 153.99, 177.12, \
202.09, 228.84, 257.36, 287.64, 319.63, 353.23, \
388.27, 424.57, 461.90, 500.00, 538.59, 577.38, \
616.04, 654.27, 691.75, 728.16, 763.21, 796.59, \
828.05, 857.34, 884.27, 908.65, 930.37, 949.35, \
965.57, 979.06, 989.94, 998.39, 1004.64, 1009.06])
diff = (pressures[1:] - pressures[0:-1])/2.
upper = np.append(pressures[0:-1] + diff, [1013.0])
lower = np.append([0.0], pressures[1:] - diff)
# Normalize cld top pressures to pressures defined above
ctp = ctp * (np.nanmax(ctp)/pressures[-1])
# Check that tblpath is valid
if not os.path.exists(tblpath): raise ValueError('Path to tables does not exist!')
# Check that satellite name is valid
valid = ['goes-r', 'himawari', 'noaa-15', 'noaa-18', 'noaa-19', \
'metop-a', 'metop-b', 'aqua', 'terra', 'meteosat', 'snpp']
check = satellite in valid
if not check: raise ValueError('Invalid satellite name')
# Check that satellite/sensor combination is valid and define parameters
earthrad = 6371. #km; radius of earth
if sensor == 'abi' :
satalt = 35800. #km
if satellite != 'goes-r':
raise ValueError('Invalid satellite/sensor combination')
elif sensor == 'ahi' :
satalt = 35800. #km
if satellite != 'himawari':
raise ValueError('Invalid satellite/sensor combination')
elif sensor == 'avhrr' :
satalt = 833. #km
if satellite != 'noaa-15' and satellite != 'noaa-18' and \
satellite != 'noaa-19' and \
satellite != 'metop-a' and satellite != 'metop-b':
raise ValueError('Invalid satellite/sensor combination')
elif sensor == 'modis' :
satalt = 705. #km; altitude
if ((satellite != 'aqua') and (satellite != 'terra')):
raise ValueError('Invalid satellite/sensor combination')
elif sensor == 'seviri':
satalt = 35800. #km
if satellite != 'meteosat':
raise ValueError('Invalid satellite/sensor combination')
elif sensor == 'viirs':
satalt = 824. #km
if satellite != 'snpp':
raise ValueError('Invalid satellite/sensor combination')
else: raise ValueError('Invalid sensor name')
# Check that data is 2D or 3D. Expand dimensions to 3D if input is 2D.
sz = np.shape(data)
ndims = len(sz)
if ndims != 2 and ndims != 3:
raise Exception('Data array must be 2D (for single band) or 3D (for multiple bands)')
if ndims == 2: data = np.expand_dims(data, 2)
sz = np.shape(data)
lines = sz[0]
pixels = sz[1]
nbands = len(bandorder)
if len(bandorder) != sz[2]:
raise Exception('Bandorder does not match the number of bands in data array')
# Make sure latitude array is 2D and same size as imagery
szl = np.shape(lats)
lndims = len(szl)
if szl[0] != sz[0] or szl[1] != sz[1] or lndims != 2:
raise IndexError('Latitude array must be 2D and the same resolution as the imagery')
# Make sure vza is 2D and the same size as latitude array
szvza = np.shape(vza)
andims = len(szvza)
if szvza != szl or szvza[0] != sz[0] or szvza[1] != sz[1] or lndims != 2:
raise IndexError('VZA array must be 2D and the same resolution as the imagery')
# Convert to radians if given in degrees
if np.nanmax(vza) > 2.0:
vza = vza*3.141592654/180.
print ' VZA provided in degrees. Converting to radians...'
# Define array for limb corrected brightness temperatures
correct = np.zeros_like(data)
# Prepare Julian day and latitudes
# NOTE: jday and lats were divided by 100 when coefficients were calculated,
# so the same is done here in order to retrieve the correct values.
ddd = float(jday)/100.
ttt = np.copy(lats)
badval = np.where( (np.abs(lats) > 90.) | (np.isnan(lats) == 1) )
ttt[badval] = np.nan
lats = None
# Calculate mean latitude per image line (to speed up processing)
if not highres:
ttt = np.nanmean(ttt, axis=1)
# Clip latitudes to [-72.5, 72.5], since coefficients are not defined beyond this range.
ttt[ttt >= 72.5] = 72.5
ttt[ttt <= -72.5] = -72.5
ttt = ttt/100.
# Limit VZA from 0 to 89 deg (0 to pi/2 rad)
vza[badval] = np.nan
vza[vza > 89.*np.pi/180.] = 89.*np.pi/180.
# Calculate vza-based factor array
factor = np.abs(np.log(np.cos(vza)))
print ' Min/Max VZA : %5.3f %5.3f radians\n' %(np.nanmin(vza), np.nanmax(vza))
factor[badval] = np.nan
vza = None
# Begin limb correction
print 'Performing limb correction...'
if highres: print 'High resolution correction: ON'
else: print 'High resolution correction: OFF'
for m in range(nbands):
print '...Band %02i...' %(bandorder[m])
# Make sure band is a supported thermal infrared band.
# If not, copy data without limb-correcting.
if sensor == 'abi' and (bandorder[m] < 7 or bandorder[m] >16):
correct[:,:,m] = data[:,:,m]
continue
if sensor == 'ahi' and (bandorder[m] < 7 or bandorder[m] >16):
correct[:,:,m] = data[:,:,m]
continue
if sensor == 'avhrr' and (bandorder[m] < 3 or bandorder[m] > 5):
correct[:,:,m] = data[:,:,m]
continue
if sensor == 'modis' and (bandorder[m] < 27 or bandorder[m] > 36) \
and (bandorder[m] != 20) :
correct[:,:,m] = data[:,:,m]
continue
if sensor == 'seviri' and (bandorder[m] < 4 or bandorder[m] >11):
correct[:,:,m] = data[:,:,m]
continue
if sensor == 'viirs' and (bandorder[m] < 12 or bandorder[m] > 16):
correct[:,:,m] = data[:,:,m]
continue
# Read lookup tables
# Version 2.0
try:
coeffAfile = os.path.join(tblpath,sensor,'%s_band%02i_Acoeff.txt' \
%(sensor,bandorder[m]))
coeffBfile = os.path.join(tblpath,sensor,'%s_band%02i_Bcoeff.txt' \
%(sensor,bandorder[m]))
ma = np.genfromtxt(coeffAfile, dtype=None, skip_header=1, autostrip=True)
mb = np.genfromtxt(coeffBfile, dtype=None, skip_header=1, autostrip=True)
if cldcorr:
cldfile = os.path.join(tblpath,sensor,'%s_band%02i_CLDscl.txt' \
%(sensor,bandorder[m]))
mc = np.genfromtxt(cldfile, dtype=None, skip_header=1, autostrip=True)
# For Version 1.0 compatibility
except IOError:
coeffAfile = os.path.join(tblpath,'%s_band%02i_Acoeff.txt' \
%(sensor,bandorder[m]))
coeffBfile = os.path.join(tblpath,'%s_band%02i_Bcoeff.txt' \
%(sensor,bandorder[m]))
ma = np.genfromtxt(coeffAfile, dtype=None, skip_header=1, autostrip=True)
mb = np.genfromtxt(coeffBfile, dtype=None, skip_header=1, autostrip=True)
if cldcorr:
cldfile = os.path.join(tblpath,'%s_band%02i_CLDscl.txt' \
%(sensor,bandorder[m]))
mc = np.genfromtxt(cldfile, dtype=None, skip_header=1, autostrip=True)
# Calculate correction coefficients
# Calculate C2 (tb_qcoeff) and C1 (tb_lcoeff)
start = time.clock()
tb_qcoeff, tb_lcoeff = calc_coeff(ma, mb, ddd, ttt)
print ' C2 range: [%5.3f, %5.3f]' %(np.nanmin(tb_qcoeff), np.nanmax(tb_qcoeff))
print ' C1 range: [%5.3f, %5.3f]' %(np.nanmin(tb_lcoeff), np.nanmax(tb_lcoeff))
# Calculate Q (cldfactor)
cldfactor = np.ones([lines,pixels],dtype=float)
if cldcorr:
for plev in range(np.shape(mc)[0]):
cf = calc_cld(mc[plev,:], ddd, ttt)
# Fill cldfactor array with appropriate Q values
for line in range(lines):
ind = np.where((ctp[line,:] >= lower[plev]) & (ctp[line,:] < upper[plev]))
if len(ind[0]) == 0: continue
try: cldfactor[line,ind] = cf[line]
except ValueError: cldfactor[line,ind] = cf[line,ind]
over = np.where(cldfactor > 1.0)
cldfactor[over] = 1.0
end = time.clock()
print ' Calculation time: %5.1f seconds\n' %(end-start)
# Get offset value for intercalibration:
tb_offset = 0.00 # tb_offset = 0.0 if value not found below
# Intercalibration to SEVIRI
if refsen == 'seviri':
# modis
if sensor == 'modis':
if satellite == 'aqua':
if bandorder[m] == 20: tb_offset = -2.25
if bandorder[m] == 27: tb_offset = -3.10
if bandorder[m] == 28: tb_offset = 0.10
if bandorder[m] == 29: tb_offset = -0.10
if bandorder[m] == 30: tb_offset = -1.60
if bandorder[m] == 31: tb_offset = 0.15
if bandorder[m] == 32: tb_offset = -0.30
if satellite == 'terra':
if bandorder[m] == 20: tb_offset = -2.25
if bandorder[m] == 27: tb_offset = 0.00
if bandorder[m] == 28: tb_offset = 0.48
if bandorder[m] == 29: tb_offset = -0.90
if bandorder[m] == 30: tb_offset = -1.55
if bandorder[m] == 31: tb_offset = 0.25
if bandorder[m] == 32: tb_offset = -0.30
# viirs-m
if sensor == 'viirs':
if satellite == 'snpp':
if bandorder[m] == 12: tb_offset = -3.00
if bandorder[m] == 14: tb_offset = 0.70
if bandorder[m] == 15: tb_offset = 0.00
if bandorder[m] == 16: tb_offset = -0.40
# avhrr
if sensor == 'avhrr':
if satellite == 'noaa-15' or satellite == 'noaa-18':
if bandorder[m] == 3: tb_offset = -3.00
if bandorder[m] == 4: tb_offset = -0.40
if bandorder[m] == 5: tb_offset = -0.20
if satellite == 'noaa-19':
if bandorder[m] == 3: tb_offset = -3.00
if bandorder[m] == 4: tb_offset = -0.50
if bandorder[m] == 5: tb_offset = -0.20
if satellite == 'metop-a':
if bandorder[m] == 3: tb_offset = -2.50
if bandorder[m] == 4: tb_offset = -0.20
if bandorder[m] == 5: tb_offset = -0.20
if satellite == 'metop-b':
if bandorder[m] == 3: tb_offset = -2.75
if bandorder[m] == 4: tb_offset = -0.30
if bandorder[m] == 5: tb_offset = -0.20
# Intercalibration to AHI
elif refsen == 'ahi' or refsen == 'abi':
# modis
if sensor == 'modis':
if satellite == 'aqua':
if bandorder[m] == 20: tb_offset = 0.00
if bandorder[m] == 27: tb_offset = -3.80
if bandorder[m] == 28: tb_offset = 0.20
if bandorder[m] == 29: tb_offset = 0.00
if bandorder[m] == 30: tb_offset = -3.40
if bandorder[m] == 31: tb_offset = 0.70
if bandorder[m] == 32: tb_offset = 0.00
if satellite == 'terra':
if bandorder[m] == 20: tb_offset = 0.00
if bandorder[m] == 27: tb_offset = -3.80
if bandorder[m] == 28: tb_offset = 0.20
if bandorder[m] == 29: tb_offset = 0.00
if bandorder[m] == 30: tb_offset = -3.40
if bandorder[m] == 31: tb_offset = 0.70
if bandorder[m] == 32: tb_offset = 0.00
# viirs-m - not yet calculated for AHI
if sensor == 'viirs':
if satellite == 'snpp':
if bandorder[m] == 12: tb_offset = 0.00
if bandorder[m] == 14: tb_offset = 0.00
if bandorder[m] == 15: tb_offset = 0.00
if bandorder[m] == 16: tb_offset = 0.00
# avhrr
if sensor == 'avhrr':
if satellite == 'noaa-15' or satellite == 'noaa-18':
if bandorder[m] == 3: tb_offset = -0.20
if bandorder[m] == 4: tb_offset = 0.00
if bandorder[m] == 5: tb_offset = -2.20
if satellite == 'noaa-19':
if bandorder[m] == 3: tb_offset = -0.20
if bandorder[m] == 4: tb_offset = -0.20
if bandorder[m] == 5: tb_offset = -2.20
if satellite == 'metop-a':
if bandorder[m] == 3: tb_offset = -0.20
if bandorder[m] == 4: tb_offset = -0.20
if bandorder[m] == 5: tb_offset = -2.75
if satellite == 'metop-b':
if bandorder[m] == 3: tb_offset = -0.20
if bandorder[m] == 4: tb_offset = -0.20
if bandorder[m] == 5: tb_offset = -2.50
# Expand dimensions of correction coefficient arrays to match data if
# highres correction was not done
if not highres:
tb_qcoeff = np.expand_dims(tb_qcoeff, 2)
tb_qcoeff = np.tile(tb_qcoeff, (1,sz[1]))
tb_lcoeff = np.expand_dims(tb_lcoeff, 2)
tb_lcoeff = np.tile(tb_lcoeff, (1,sz[1]))
# Calculate corrected brightness temperatures using Eqn. 6 from Elmer et al. (2016)
corr = (np.squeeze(data[:,:,m]) + tb_offset) + \
cldfactor*(tb_lcoeff*factor + tb_qcoeff*(factor**2))
corr[badval]=fillval
correct[:,:,m] = corr
return np.squeeze(correct)
print 'Done!'
|
{
"content_hash": "da48058157bc642519a14e94f13eb667",
"timestamp": "",
"source": "github",
"line_count": 495,
"max_line_length": 99,
"avg_line_length": 39.345454545454544,
"alnum_prop": 0.5979667282809612,
"repo_name": "njelmer/limbcorr",
"id": "c14921ac80daee7477334f195841772f81df1484",
"size": "19501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "limbcorr_v2.0/nje_limbcorr_v2.0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40178"
},
{
"name": "Roff",
"bytes": "3115"
}
],
"symlink_target": ""
}
|
import platform
import socket
import subprocess
PORT_TIMEOUT = 5
PING_TIMEOUT = 5
def is_port_open(host: str, port: int) -> bool:
"""
Test if a given port in a host is open.
"""
# pylint: disable=invalid-name
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(PORT_TIMEOUT)
try:
s.connect((host, port))
s.shutdown(socket.SHUT_RDWR)
return True
except socket.error:
return False
finally:
s.close()
def is_hostname_valid(host: str) -> bool:
"""
Test if a given hostname can be resolved.
"""
try:
socket.gethostbyname(host)
return True
except socket.gaierror:
return False
def is_host_up(host: str) -> bool:
"""
Ping a host to see if it's up.
Note that if we don't get a response the host might still be up,
since many firewalls block ICMP packets.
"""
param = "-n" if platform.system().lower() == "windows" else "-c"
command = ["ping", param, "1", host]
try:
output = subprocess.call(command, timeout=PING_TIMEOUT)
except subprocess.TimeoutExpired:
return False
return output == 0
|
{
"content_hash": "0ed0a856bbc8f25507624dd9b680df71",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 68,
"avg_line_length": 23.137254901960784,
"alnum_prop": 0.6127118644067797,
"repo_name": "zhouyao1994/incubator-superset",
"id": "8079a96e203b1d94128f78d3b2c505ef194df0ae",
"size": "1965",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "superset/utils/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4776"
},
{
"name": "Dockerfile",
"bytes": "6940"
},
{
"name": "HTML",
"bytes": "1243911"
},
{
"name": "JavaScript",
"bytes": "2445349"
},
{
"name": "Jinja",
"bytes": "5542"
},
{
"name": "Jupyter Notebook",
"bytes": "1925627"
},
{
"name": "Less",
"bytes": "106438"
},
{
"name": "Makefile",
"bytes": "3946"
},
{
"name": "Mako",
"bytes": "1197"
},
{
"name": "Pug",
"bytes": "2969"
},
{
"name": "Python",
"bytes": "6296253"
},
{
"name": "Shell",
"bytes": "56211"
},
{
"name": "Smarty",
"bytes": "4298"
},
{
"name": "TypeScript",
"bytes": "6909337"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from fabric import colors
from lib.base import (
BGPContainer,
CmdBuffer,
try_several_times,
wait_for_completion,
)
class ExaBGPContainer(BGPContainer):
SHARED_VOLUME = '/shared_volume'
PID_FILE = '/var/run/exabgp.pid'
def __init__(self, name, asn, router_id, ctn_image_name='osrg/exabgp:4.0.5'):
super(ExaBGPContainer, self).__init__(name, asn, router_id, ctn_image_name)
self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
def _pre_start_exabgp(self):
# Create named pipes for "exabgpcli"
named_pipes = '/run/exabgp.in /run/exabgp.out'
self.local('mkfifo {0}'.format(named_pipes), capture=True)
self.local('chmod 777 {0}'.format(named_pipes), capture=True)
def _start_exabgp(self):
cmd = CmdBuffer(' ')
cmd << 'env exabgp.log.destination={0}/exabgpd.log'.format(self.SHARED_VOLUME)
cmd << 'exabgp.daemon.user=root'
cmd << 'exabgp.daemon.pid={0}'.format(self.PID_FILE)
cmd << 'exabgp.tcp.bind="0.0.0.0" exabgp.tcp.port=179'
cmd << 'exabgp {0}/exabgpd.conf'.format(self.SHARED_VOLUME)
self.local(str(cmd), detach=True)
def _wait_for_boot(self):
def _f():
ret = self.local('exabgpcli version > /dev/null 2>&1; echo $?', capture=True)
return ret == '0'
return wait_for_completion(_f)
def run(self):
super(ExaBGPContainer, self).run()
self._pre_start_exabgp()
# To start ExaBGP, it is required to configure neighbor settings, so
# here does not start ExaBGP yet.
# self._start_exabgp()
return self.WAIT_FOR_BOOT
def create_config(self):
# Manpage of exabgp.conf(5):
# https://github.com/Exa-Networks/exabgp/blob/master/doc/man/exabgp.conf.5
cmd = CmdBuffer('\n')
for peer, info in self.peers.iteritems():
cmd << 'neighbor {0} {{'.format(info['neigh_addr'].split('/')[0])
cmd << ' router-id {0};'.format(self.router_id)
cmd << ' local-address {0};'.format(info['local_addr'].split('/')[0])
cmd << ' local-as {0};'.format(self.asn)
cmd << ' peer-as {0};'.format(peer.asn)
caps = []
if info['as2']:
caps.append(' asn4 disable;')
if info['addpath']:
caps.append(' add-path send/receive;')
if caps:
cmd << ' capability {'
for cap in caps:
cmd << cap
cmd << ' }'
if info['passwd']:
cmd << ' md5-password "{0}";'.format(info['passwd'])
if info['passive']:
cmd << ' passive;'
cmd << '}'
with open('{0}/exabgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new exabgpd.conf]'.format(self.name))
print colors.yellow(str(cmd))
f.write(str(cmd))
def _is_running(self):
ret = self.local("test -f {0}; echo $?".format(self.PID_FILE), capture=True)
return ret == '0'
def reload_config(self):
if not self.peers:
return
def _reload():
if self._is_running():
self.local('/usr/bin/pkill --pidfile {0} && rm -f {0}'.format(self.PID_FILE), capture=True)
else:
self._start_exabgp()
self._wait_for_boot()
if not self._is_running():
raise RuntimeError('Could not start ExaBGP')
try_several_times(_reload)
def _construct_ip_unicast(self, path):
cmd = CmdBuffer(' ')
cmd << str(path['prefix'])
if path['next-hop']:
cmd << 'next-hop {0}'.format(path['next-hop'])
else:
cmd << 'next-hop self'
return str(cmd)
def _construct_flowspec(self, path):
cmd = CmdBuffer(' ')
cmd << '{ match {'
for match in path['matchs']:
cmd << '{0};'.format(match)
cmd << '} then {'
for then in path['thens']:
cmd << '{0};'.format(then)
cmd << '} }'
return str(cmd)
def _construct_path_attributes(self, path):
cmd = CmdBuffer(' ')
if path['as-path']:
cmd << 'as-path [{0}]'.format(' '.join(str(i) for i in path['as-path']))
if path['med']:
cmd << 'med {0}'.format(path['med'])
if path['local-pref']:
cmd << 'local-preference {0}'.format(path['local-pref'])
if path['community']:
cmd << 'community [{0}]'.format(' '.join(c for c in path['community']))
if path['extended-community']:
cmd << 'extended-community [{0}]'.format(path['extended-community'])
if path['attr']:
cmd << 'attribute [ {0} ]'.format(path['attr'])
return str(cmd)
def _construct_path(self, path, rf='ipv4', is_withdraw=False):
cmd = CmdBuffer(' ')
if rf in ['ipv4', 'ipv6']:
cmd << 'route'
cmd << self._construct_ip_unicast(path)
elif rf in ['ipv4-flowspec', 'ipv6-flowspec']:
cmd << 'flow route'
cmd << self._construct_flowspec(path)
else:
raise ValueError('unsupported address family: %s' % rf)
if path['identifier']:
cmd << 'path-information {0}'.format(path['identifier'])
if not is_withdraw:
# Withdrawal should not require path attributes
cmd << self._construct_path_attributes(path)
return str(cmd)
def add_route(self, route, rf='ipv4', attribute=None, aspath=None,
community=None, med=None, extendedcommunity=None,
nexthop=None, matchs=None, thens=None,
local_pref=None, identifier=None, reload_config=False):
if not self._is_running():
raise RuntimeError('ExaBGP is not yet running')
self.routes.setdefault(route, [])
path = {
'prefix': route,
'rf': rf,
'attr': attribute,
'next-hop': nexthop,
'as-path': aspath,
'community': community,
'med': med,
'local-pref': local_pref,
'extended-community': extendedcommunity,
'identifier': identifier,
'matchs': matchs,
'thens': thens,
}
cmd = CmdBuffer(' ')
cmd << "exabgpcli 'announce"
cmd << self._construct_path(path, rf=rf)
cmd << "'"
self.local(str(cmd), capture=True)
self.routes[route].append(path)
def del_route(self, route, identifier=None, reload_config=False):
if not self._is_running():
raise RuntimeError('ExaBGP is not yet running')
path = None
new_paths = []
for p in self.routes.get(route, []):
if p['identifier'] != identifier:
new_paths.append(p)
else:
path = p
if not path:
return
rf = path['rf']
cmd = CmdBuffer(' ')
cmd << "exabgpcli 'withdraw"
cmd << self._construct_path(path, rf=rf, is_withdraw=True)
cmd << "'"
self.local(str(cmd), capture=True)
self.routes[route] = new_paths
def _get_adj_rib(self, peer, rf, in_out='in'):
# IPv4 Unicast:
# neighbor 172.17.0.2 ipv4 unicast 192.168.100.0/24 path-information 0.0.0.20 next-hop self
# IPv6 FlowSpec:
# neighbor 172.17.0.2 ipv6 flow flow destination-ipv6 2002:1::/64/0 source-ipv6 2002:2::/64/0 next-header =udp flow-label >100
rf_map = {
'ipv4': ['ipv4', 'unicast'],
'ipv6': ['ipv6', 'unicast'],
'ipv4-flowspec': ['ipv4', 'flow'],
'ipv6-flowspec': ['ipv6', 'flow'],
}
assert rf in rf_map
assert in_out in ('in', 'out')
peer_addr = self.peer_name(peer)
lines = self.local('exabgpcli show adj-rib {0}'.format(in_out), capture=True).split('\n')
# rib = {
# <nlri>: [
# {
# 'nlri': <nlri>,
# 'next-hop': <next-hop>,
# ...
# },
# ...
# ],
# }
rib = {}
for line in lines:
if not line:
continue
values = line.split()
if peer_addr != values[1]:
continue
elif rf is not None and rf_map[rf] != values[2:4]:
continue
if rf in ('ipv4', 'ipv6'):
nlri = values[4]
rib.setdefault(nlri, [])
path = {k: v for k, v in zip(*[iter(values[5:])] * 2)}
path['nlri'] = nlri
rib[nlri].append(path)
elif rf in ('ipv4-flowspec', 'ipv6-flowspec'):
# XXX: Missing path attributes?
nlri = ' '.join(values[5:])
rib.setdefault(nlri, [])
path = {'nlri': nlri}
rib[nlri].append(path)
return rib
def get_adj_rib_in(self, peer, rf='ipv4'):
return self._get_adj_rib(peer, rf, 'in')
def get_adj_rib_out(self, peer, rf='ipv4'):
return self._get_adj_rib(peer, rf, 'out')
class RawExaBGPContainer(ExaBGPContainer):
def __init__(self, name, config, ctn_image_name='osrg/exabgp',
exabgp_path=''):
asn = None
router_id = None
for line in config.split('\n'):
line = line.strip()
if line.startswith('local-as'):
asn = int(line[len('local-as'):].strip('; '))
if line.startswith('router-id'):
router_id = line[len('router-id'):].strip('; ')
if not asn:
raise Exception('asn not in exabgp config')
if not router_id:
raise Exception('router-id not in exabgp config')
self.config = config
super(RawExaBGPContainer, self).__init__(name, asn, router_id,
ctn_image_name, exabgp_path)
def create_config(self):
with open('{0}/exabgpd.conf'.format(self.config_dir), 'w') as f:
print colors.yellow('[{0}\'s new exabgpd.conf]'.format(self.name))
print colors.yellow(self.config)
f.write(self.config)
|
{
"content_hash": "0d944b88aa7b8a002b370eac15f7ae72",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 134,
"avg_line_length": 35.432432432432435,
"alnum_prop": 0.5069603356216629,
"repo_name": "tamihiro/gobgp",
"id": "1326dd86382988f1a8ce17fc802a5aefac9317c0",
"size": "11101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/lib/exabgp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2761"
},
{
"name": "Go",
"bytes": "2057102"
},
{
"name": "Makefile",
"bytes": "3359"
},
{
"name": "Python",
"bytes": "585184"
},
{
"name": "Shell",
"bytes": "90184"
}
],
"symlink_target": ""
}
|
"""
pyexcel_io.database
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
database data importer and exporter
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
from pyexcel_io.plugins import IOPluginInfoChain
from pyexcel_io.constants import DB_DJANGO, DB_SQL
IOPluginInfoChain(__name__).add_a_reader(
relative_plugin_class_path='exporters.django.DjangoBookReader',
file_types=[DB_DJANGO]
).add_a_reader(
relative_plugin_class_path='exporters.sqlalchemy.SQLBookReader',
file_types=[DB_SQL],
).add_a_writer(
relative_plugin_class_path='importers.django.DjangoBookWriter',
file_types=[DB_DJANGO],
).add_a_writer(
relative_plugin_class_path='importers.sqlalchemy.SQLBookWriter',
file_types=[DB_SQL]
)
|
{
"content_hash": "5f598b529977d6cbd4a19f7561657ec9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 30.653846153846153,
"alnum_prop": 0.6925972396486826,
"repo_name": "caspartse/QQ-Groups-Spider",
"id": "01357e47de148709fbe3497070ff947f9398918f",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/pyexcel_io/database/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "157970"
},
{
"name": "Python",
"bytes": "10416"
},
{
"name": "Smarty",
"bytes": "9490"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from storages.backends.s3boto import S3BotoStorage
from django.contrib.staticfiles.storage import ManifestFilesMixin
from toolware.utils.generic import get_domain
from .. import defaults as defs
class MediaFilesStorage(S3BotoStorage):
"""
Custom S3 storage for uploaded assets. (any file type)
"""
def __init__(self, *args, **kwargs):
kwargs['bucket'] = settings.AWS_MEDIA_BUCKET_NAME
kwargs['location'] = settings.MEDIA_ASSETS_PREFIX
kwargs['custom_domain'] = get_domain(settings.AWS_MEDIA_CDN)
super().__init__(*args, **kwargs)
class StaticFilesStorage(S3BotoStorage):
"""
Custom S3 storage for static assets. (any file type)
"""
def __init__(self, *args, **kwargs):
kwargs['bucket'] = settings.AWS_STATIC_BUCKET_NAME
kwargs['location'] = settings.STATIC_ASSETS_PREFIX
kwargs['custom_domain'] = get_domain(settings.AWS_STATIC_CDN)
super().__init__(*args, **kwargs)
class ManifestStaticFilesStorage(ManifestFilesMixin, S3BotoStorage):
"""
Custom S3 storage for manifest static assets. (any file type).
"""
def __init__(self, *args, **kwargs):
kwargs['bucket'] = settings.AWS_STATIC_BUCKET_NAME
kwargs['location'] = settings.STATIC_ASSETS_PREFIX
kwargs['custom_domain'] = get_domain(settings.AWS_STATIC_CDN)
super().__init__(*args, **kwargs)
@property
def manifest_name(self):
filename = 'staticfiles-{}.json'.format(defs.MANIFEST_STATIC_FILE_VERSION)
return filename
|
{
"content_hash": "1cd88d06c930f834239d9a38cd828137",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 82,
"avg_line_length": 34.32608695652174,
"alnum_prop": 0.6681443951868271,
"repo_name": "un33k/djangoware",
"id": "4826883ed07e27d2452a65b5025ebfe8a56fa7b6",
"size": "1789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/sitewide/storage/s3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6198"
},
{
"name": "Python",
"bytes": "109976"
},
{
"name": "Shell",
"bytes": "8545"
}
],
"symlink_target": ""
}
|
__all__ = ['arc','arrow','axis','colorbar','compass','contour','default',
'getlimits','halftone','legend','plot','polygon','rgb','rect',
'regionfile','savefig','stick','subplot','subplot2','text','title',
'vector','vectorkey','xlabel','ylabel']
from arc import arc
from arrow import arrow
from axis import axis
from colorbar import colorbar
from compass import compass
from contour import contour
from default import default
from getlimits import getlimits
from halftone import halftone
from legend import legend
from plot import plot
from polygon import polygon
from rgb import rgb
from rect import rect
from regionfile import regionfile
from savefig import savefig
from stick import stick
from subplot import subplot
from subplot2 import subplot2
from text import text
from title import title
from vector import vector
from vectorkey import vectorkey
from xlabel import xlabel
from ylabel import ylabel
|
{
"content_hash": "0b3fb8a7f6197361e6084012ea06f5df",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 31.3,
"alnum_prop": 0.7646432374866879,
"repo_name": "nlchap0/nlcpython",
"id": "4ca832f76f1f2eeeaf71a1b1942aac859c9d13b3",
"size": "985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nlcplot/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "20572"
},
{
"name": "Python",
"bytes": "435025"
},
{
"name": "TeX",
"bytes": "25106"
}
],
"symlink_target": ""
}
|
"""
Helper script for starting/stopping/reloading Glance server programs.
Thanks for some of the code, Swifties ;)
"""
from __future__ import print_function
from __future__ import with_statement
import argparse
import fcntl
import os
import resource
import signal
import subprocess
import sys
import tempfile
import time
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo.config import cfg
from six.moves import xrange
from glance.common import config
from glance.openstack.common import units
CONF = cfg.CONF
ALL_COMMANDS = ['start', 'status', 'stop', 'shutdown', 'restart',
'reload', 'force-reload']
ALL_SERVERS = ['api', 'registry', 'scrubber']
GRACEFUL_SHUTDOWN_SERVERS = ['glance-api', 'glance-registry',
'glance-scrubber']
MAX_DESCRIPTORS = 32768
MAX_MEMORY = 2 * units.Gi # 2 GB
USAGE = """%(prog)s [options] <SERVER> <COMMAND> [CONFPATH]
Where <SERVER> is one of:
all, {0}
And command is one of:
{1}
And CONFPATH is the optional configuration file to use.""".\
format(', '.join(ALL_SERVERS), ', '.join(ALL_COMMANDS))
exitcode = 0
def gated_by(predicate):
def wrap(f):
def wrapped_f(*args):
if predicate:
return f(*args)
else:
return None
return wrapped_f
return wrap
def pid_files(server, pid_file):
pid_files = []
if pid_file:
if os.path.exists(os.path.abspath(pid_file)):
pid_files = [os.path.abspath(pid_file)]
else:
if os.path.exists('/var/run/glance/%s.pid' % server):
pid_files = ['/var/run/glance/%s.pid' % server]
for pid_file in pid_files:
pid = int(open(pid_file).read().strip())
yield pid_file, pid
def do_start(verb, pid_file, server, args):
if verb != 'Respawn' and pid_file == CONF.pid_file:
for pid_file, pid in pid_files(server, pid_file):
if os.path.exists('/proc/%s' % pid):
print(_("%(serv)s appears to already be running: %(pid)s") %
{'serv': server, 'pid': pid_file})
return
else:
print(_("Removing stale pid file %s") % pid_file)
os.unlink(pid_file)
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print(_('Unable to increase file descriptor limit. '
'Running as non-root?'))
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
def write_pid_file(pid_file, pid):
with open(pid_file, 'w') as fp:
fp.write('%d\n' % pid)
def redirect_to_null(fds):
with open(os.devnull, 'r+b') as nullfile:
for desc in fds: # close fds
try:
os.dup2(nullfile.fileno(), desc)
except OSError:
pass
def redirect_to_syslog(fds, server):
log_cmd = 'logger'
log_cmd_params = '-t "%s[%d]"' % (server, os.getpid())
process = subprocess.Popen([log_cmd, log_cmd_params],
stdin=subprocess.PIPE)
for desc in fds: # pipe to logger command
try:
os.dup2(process.stdin.fileno(), desc)
except OSError:
pass
def redirect_stdio(server, capture_output):
input = [sys.stdin.fileno()]
output = [sys.stdout.fileno(), sys.stderr.fileno()]
redirect_to_null(input)
if capture_output:
redirect_to_syslog(output, server)
else:
redirect_to_null(output)
@gated_by(CONF.capture_output)
def close_stdio_on_exec():
fds = [sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()]
for desc in fds: # set close on exec flag
fcntl.fcntl(desc, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
def launch(pid_file, conf_file=None, capture_output=False, await_time=0):
args = [server]
msg = (_('%(verb)sing %(serv)s') % {'verb': verb, 'serv': server})
if conf_file:
args += ['--config-file', conf_file]
msg += 'with %s' % conf_file
print(msg)
close_stdio_on_exec()
pid = os.fork()
if pid == 0:
os.setsid()
redirect_stdio(server, capture_output)
try:
os.execlp('%s' % server, *args)
except OSError as e:
msg = (_('unable to launch %(serv)s. Got error: %(e)s') %
{'serv': server, 'e': e})
sys.exit(msg)
sys.exit(0)
else:
write_pid_file(pid_file, pid)
await_child(pid, await_time)
return pid
@gated_by(CONF.await_child)
def await_child(pid, await_time):
bail_time = time.time() + await_time
while time.time() < bail_time:
reported_pid, status = os.waitpid(pid, os.WNOHANG)
if reported_pid == pid:
global exitcode
exitcode = os.WEXITSTATUS(status)
break
time.sleep(0.05)
conf_file = None
if args and os.path.exists(args[0]):
conf_file = os.path.abspath(os.path.expanduser(args[0]))
return launch(pid_file, conf_file, CONF.capture_output, CONF.await_child)
def do_check_status(pid_file, server):
if os.path.exists(pid_file):
with open(pid_file, 'r') as pidfile:
pid = pidfile.read().strip()
print(_("%(serv)s (pid %(pid)s) is running...") %
{'serv': server, 'pid': pid})
else:
print(_("%s is stopped") % server)
def get_pid_file(server, pid_file):
pid_file = (os.path.abspath(pid_file) if pid_file else
'/var/run/glance/%s.pid' % server)
dir, file = os.path.split(pid_file)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
if not os.access(dir, os.W_OK):
fallback = os.path.join(tempfile.mkdtemp(), '%s.pid' % server)
msg = (_('Unable to create pid file %(pid)s. Running as non-root?\n'
'Falling back to a temp file, you can stop %(service)s '
'service using:\n'
' %(file)s %(server)s stop --pid-file %(fb)s') %
{'pid': pid_file,
'service': server,
'file': __file__,
'server': server,
'fb': fallback})
print(msg)
pid_file = fallback
return pid_file
def do_stop(server, args, graceful=False):
if graceful and server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
did_anything = False
pfiles = pid_files(server, CONF.pid_file)
for pid_file, pid in pfiles:
did_anything = True
try:
os.unlink(pid_file)
except OSError:
pass
try:
print(_('Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)')
% {'serv': server, 'pid': pid, 'sig': sig})
os.kill(pid, sig)
except OSError:
print(_("Process %d not running") % pid)
for pid_file, pid in pfiles:
for _junk in xrange(150): # 15 seconds
if not os.path.exists('/proc/%s' % pid):
break
time.sleep(0.1)
else:
print(_('Waited 15 seconds for pid %(pid)s (%(file)s) to die;'
' giving up') % {'pid': pid, 'file': pid_file})
if not did_anything:
print(_('%s is already stopped') % server)
def add_command_parsers(subparsers):
cmd_parser = argparse.ArgumentParser(add_help=False)
cmd_subparsers = cmd_parser.add_subparsers(dest='command')
for cmd in ALL_COMMANDS:
parser = cmd_subparsers.add_parser(cmd)
parser.add_argument('args', nargs=argparse.REMAINDER)
for server in ALL_SERVERS:
full_name = 'glance-' + server
parser = subparsers.add_parser(server, parents=[cmd_parser])
parser.set_defaults(servers=[full_name])
parser = subparsers.add_parser(full_name, parents=[cmd_parser])
parser.set_defaults(servers=[full_name])
parser = subparsers.add_parser('all', parents=[cmd_parser])
parser.set_defaults(servers=['glance-' + s for s in ALL_SERVERS])
def main():
global exitcode
opts = [
cfg.SubCommandOpt('server',
title='Server types',
help='Available server types',
handler=add_command_parsers),
cfg.StrOpt('pid-file',
metavar='PATH',
help='File to use as pid file. Default: '
'/var/run/glance/$server.pid.'),
cfg.IntOpt('await-child',
metavar='DELAY',
default=0,
help='Period to wait for service death '
'in order to report exit code '
'(default is to not wait at all).'),
cfg.BoolOpt('capture-output',
default=False,
help='Capture stdout/err in syslog '
'instead of discarding it.'),
cfg.BoolOpt('respawn',
default=False,
help='Restart service on unexpected death.'),
]
CONF.register_cli_opts(opts)
config.parse_args(usage=USAGE)
@gated_by(CONF.await_child)
@gated_by(CONF.respawn)
def mutually_exclusive():
sys.stderr.write('--await-child and --respawn are mutually exclusive')
sys.exit(1)
mutually_exclusive()
@gated_by(CONF.respawn)
def anticipate_respawn(children):
while children:
pid, status = os.wait()
if pid in children:
(pid_file, server, args) = children.pop(pid)
running = os.path.exists(pid_file)
one_second_ago = time.time() - 1
bouncing = (running and
os.path.getmtime(pid_file) >= one_second_ago)
if running and not bouncing:
args = (pid_file, server, args)
new_pid = do_start('Respawn', *args)
children[new_pid] = args
else:
rsn = 'bouncing' if bouncing else 'deliberately stopped'
print(_('Suppressed respawn as %(serv)s was %(rsn)s.')
% {'serv': server, 'rsn': rsn})
if CONF.server.command == 'start':
children = {}
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
args = (pid_file, server, CONF.server.args)
pid = do_start('Start', *args)
children[pid] = args
anticipate_respawn(children)
if CONF.server.command == 'status':
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
do_check_status(pid_file, server)
if CONF.server.command == 'stop':
for server in CONF.server.servers:
do_stop(server, CONF.server.args)
if CONF.server.command == 'shutdown':
for server in CONF.server.servers:
do_stop(server, CONF.server.args, graceful=True)
if CONF.server.command == 'restart':
for server in CONF.server.servers:
do_stop(server, CONF.server.args)
for server in CONF.server.servers:
pid_file = get_pid_file(server, CONF.pid_file)
do_start('Restart', pid_file, server, CONF.server.args)
if (CONF.server.command == 'reload' or
CONF.server.command == 'force-reload'):
for server in CONF.server.servers:
do_stop(server, CONF.server.args, graceful=True)
pid_file = get_pid_file(server, CONF.pid_file)
do_start('Restart', pid_file, server, CONF.server.args)
sys.exit(exitcode)
|
{
"content_hash": "45f374e15097f3bb9e647226ff0d33af",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 78,
"avg_line_length": 33.725067385444746,
"alnum_prop": 0.5417998721227621,
"repo_name": "redhat-openstack/glance",
"id": "e388362c4a6e9332a1cf074141f3bffc74af8e1c",
"size": "13102",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "glance/cmd/control.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "12183"
},
{
"name": "Python",
"bytes": "3304893"
},
{
"name": "Shell",
"bytes": "7168"
}
],
"symlink_target": ""
}
|
import httplib
import time
import base64
import urllib
import StringIO
import sys
import socket
try:
from oss.oss_util import *
except:
from oss_util import *
try:
from oss.oss_xml_handler import *
except:
from oss_xml_handler import *
class OssAPI:
'''
A simple OSS API
'''
DefaultContentType = 'application/octet-stream'
provider = PROVIDER
__version__ = '0.3.5'
Version = __version__
AGENT = 'oss-python%s (%s)' % (__version__, sys.platform)
def __init__(self, host='oss.aliyuncs.com', access_id='', secret_access_key='', port=80, is_security=False):
self.SendBufferSize = 8192
self.RecvBufferSize = 1024*1024*10
self.host = get_host_from_list(host)
self.port = port
self.access_id = access_id
self.secret_access_key = secret_access_key
self.show_bar = False
self.is_security = is_security
self.retry_times = 5
self.agent = self.AGENT
self.debug = False
self.timeout = 60
def set_timeout(self, timeout):
self.timeout = timeout
def set_debug(self, is_debug):
if is_debug:
self.debug = True
def set_retry_times(self, retry_times=5):
self.retry_times = retry_times
def set_send_buf_size(self, buf_size):
try:
self.SendBufferSize = (int)(buf_size)
except ValueError:
pass
def set_recv_buf_size(self, buf_size):
try:
self.RecvBufferSize = (int)(buf_size)
except ValueError:
pass
def get_connection(self, tmp_host=None):
host = ''
port = 80
if not tmp_host:
tmp_host = self.host
host_port_list = tmp_host.split(":")
if len(host_port_list) == 1:
host = host_port_list[0].strip()
elif len(host_port_list) == 2:
host = host_port_list[0].strip()
port = int(host_port_list[1].strip())
if self.is_security or port == 443:
self.is_security = True
if sys.version_info >= (2, 6):
return httplib.HTTPSConnection(host=host, port=port, timeout=self.timeout)
else:
#XXX
if not (socket.getdefaulttimeout()):
socket.setdefaulttimeout(120)
return httplib.HTTPSConnection(host=host, port=port)
else:
if sys.version_info >= (2, 6):
return httplib.HTTPConnection(host=host, port=port, timeout=self.timeout)
else:
#XXX
if not (socket.getdefaulttimeout()):
socket.setdefaulttimeout(120)
return httplib.HTTPConnection(host=host, port=port)
def sign_url_auth_with_expire_time(self, method, url, headers=None, resource="/", timeout=60, params=None):
'''
Create the authorization for OSS based on the input method, url, body and headers
:type method: string
:param method: one of PUT, GET, DELETE, HEAD
:type url: string
:param:HTTP address of bucket or object, eg: http://HOST/bucket/object
:type headers: dict
:param: HTTP header
:type resource: string
:param:path of bucket or object, eg: /bucket/ or /bucket/object
:type timeout: int
:param
Returns:
signature url.
'''
if not headers:
headers = {}
if not params:
params = {}
send_time = str(int(time.time()) + timeout)
headers['Date'] = send_time
auth_value = get_assign(self.secret_access_key, method, headers, resource, None, self.debug)
params["OSSAccessKeyId"] = self.access_id
params["Expires"] = str(send_time)
params["Signature"] = auth_value
sign_url = append_param(url, params)
return sign_url
def sign_url(self, method, bucket, object, timeout=60, headers=None, params=None):
'''
Create the authorization for OSS based on the input method, url, body and headers
:type method: string
:param method: one of PUT, GET, DELETE, HEAD
:type bucket: string
:param:
:type object: string
:param:
:type timeout: int
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
:type resource: string
:param:path of bucket or object, eg: /bucket/ or /bucket/object
Returns:
signature url.
'''
if not headers:
headers = {}
if not params:
params = {}
send_time = str(int(time.time()) + timeout)
headers['Date'] = send_time
object = convert_utf8(object)
resource = "/%s/%s%s" % (bucket, object, get_resource(params))
auth_value = get_assign(self.secret_access_key, method, headers, resource, None, self.debug)
params["OSSAccessKeyId"] = self.access_id
params["Expires"] = str(send_time)
params["Signature"] = auth_value
url = ''
object = oss_quote(object)
http = "http"
if self.is_security:
http = "https"
if is_ip(self.host):
url = "%s://%s/%s/%s" % (http, self.host, bucket, object)
elif is_oss_host(self.host):
if check_bucket_valid(bucket):
url = "%s://%s.%s/%s" % (http, bucket, self.host, object)
else:
url = "%s://%s/%s/%s" % (http, self.host, bucket, object)
else:
url = "%s://%s/%s" % (http, self.host, object)
sign_url = append_param(url, params)
return sign_url
def _create_sign_for_normal_auth(self, method, headers=None, resource="/"):
'''
NOT public API
Create the authorization for OSS based on header input.
it should be put into "Authorization" parameter of header.
:type method: string
:param:one of PUT, GET, DELETE, HEAD
:type headers: dict
:param: HTTP header
:type resource: string
:param:path of bucket or object, eg: /bucket/ or /bucket/object
Returns:
signature string
'''
auth_value = "%s %s:%s" % (self.provider, self.access_id, get_assign(self.secret_access_key, method, headers, resource, None, self.debug))
return auth_value
def bucket_operation(self, method, bucket, headers=None, params=None):
return self.http_request(method, bucket, '', headers, '', params)
def object_operation(self, method, bucket, object, headers=None, body='', params=None):
return self.http_request(method, bucket, object, headers, body, params)
def http_request(self, method, bucket, object, headers=None, body='', params=None):
'''
Send http request of operation
:type method: string
:param method: one of PUT, GET, DELETE, HEAD, POST
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
:type body: string
:param
Returns:
HTTP Response
'''
retry = 5
res = None
while retry > 0:
retry -= 1
tmp_bucket = bucket
tmp_object = object
tmp_headers = {}
if headers and isinstance(headers, dict):
tmp_headers = headers.copy()
tmp_params = {}
if params and isinstance(params, dict):
tmp_params = params.copy()
res = self.http_request_with_redirect(method, tmp_bucket, tmp_object, tmp_headers, body, tmp_params)
if check_redirect(res):
self.host = helper_get_host_from_resp(res, bucket)
else:
return res
return res
def http_request_with_redirect(self, method, bucket, object, headers=None, body='', params=None):
'''
Send http request of operation
:type method: string
:param method: one of PUT, GET, DELETE, HEAD, POST
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
:type body: string
:param
Returns:
HTTP Response
'''
if not params:
params = {}
if not headers:
headers = {}
object = convert_utf8(object)
if not bucket:
resource = "/"
headers['Host'] = self.host
else:
headers['Host'] = "%s.%s" % (bucket, self.host)
if not is_oss_host(self.host):
headers['Host'] = self.host
resource = "/%s/" % bucket
resource = convert_utf8(resource)
resource = "%s%s%s" % (resource, object, get_resource(params))
object = oss_quote(object)
url = "/%s" % object
if is_ip(self.host):
url = "/%s/%s" % (bucket, object)
if not bucket:
url = "/%s" % object
headers['Host'] = self.host
url = append_param(url, params)
date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
headers['Date'] = date
headers['Authorization'] = self._create_sign_for_normal_auth(method, headers, resource)
headers['User-Agent'] = self.agent
if check_bucket_valid(bucket) and not is_ip(self.host):
conn = self.get_connection(headers['Host'])
else:
conn = self.get_connection()
conn.request(method, url, body, headers)
return conn.getresponse()
def get_service(self, headers=None, prefix='', marker='', maxKeys=''):
'''
List all buckets of user
'''
return self.list_all_my_buckets(headers, prefix, marker, maxKeys)
def list_all_my_buckets(self, headers=None, prefix='', marker='', maxKeys=''):
'''
List all buckets of user
type headers: dict
:param
Returns:
HTTP Response
'''
method = 'GET'
bucket = ''
object = ''
body = ''
params = {}
if prefix != '':
params['prefix'] = prefix
if marker != '':
params['marker'] = marker
if maxKeys != '':
params['max-keys'] = maxKeys
return self.http_request(method, bucket, object, headers, body, params)
def get_bucket_acl(self, bucket):
'''
Get Access Control Level of bucket
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
headers = {}
body = ''
params = {}
params['acl'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_bucket_location(self, bucket):
'''
Get Location of bucket
'''
method = 'GET'
object = ''
headers = {}
body = ''
params = {}
params['location'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_bucket(self, bucket, prefix='', marker='', delimiter='', maxkeys='', headers=None):
'''
List object that in bucket
'''
return self.list_bucket(bucket, prefix, marker, delimiter, maxkeys, headers)
def list_bucket(self, bucket, prefix='', marker='', delimiter='', maxkeys='', headers=None):
'''
List object that in bucket
:type bucket: string
:param
:type prefix: string
:param
:type marker: string
:param
:type delimiter: string
:param
:type maxkeys: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['prefix'] = prefix
params['marker'] = marker
params['delimiter'] = delimiter
params['max-keys'] = maxkeys
return self.http_request(method, bucket, object, headers, body, params)
def get_website(self, bucket, headers=None):
'''
Get bucket website
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['website'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_lifecycle(self, bucket, headers=None):
'''
Get bucket lifecycle
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['lifecycle'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_logging(self, bucket, headers=None):
'''
Get bucket logging
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['logging'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_cors(self, bucket, headers=None):
'''
Get bucket cors
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['cors'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def create_bucket(self, bucket, acl='', headers=None):
'''
Create bucket
'''
return self.put_bucket(bucket, acl, headers)
def put_bucket(self, bucket, acl='', headers=None):
'''
Create bucket
:type bucket: string
:param
:type acl: string
:param: one of private public-read public-read-write
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
if acl != '':
if "AWS" == self.provider:
headers['x-amz-acl'] = acl
else:
headers['x-oss-acl'] = acl
method = 'PUT'
object = ''
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def put_logging(self, sourcebucket, targetbucket, prefix):
'''
Put bucket logging
:type sourcebucket: string
:param
:type targetbucket: string
:param: Specifies the bucket where you want Aliyun OSS to store server access logs
:type prefix: string
:param: This element lets you specify a prefix for the objects that the log files will be stored
Returns:
HTTP Response
'''
body = '<BucketLoggingStatus>'
if targetbucket:
body += '<LoggingEnabled>'
body += '<TargetBucket>%s</TargetBucket>' % targetbucket
if prefix:
body += '<TargetPrefix>%s</TargetPrefix>' % prefix
body += '</LoggingEnabled>'
body += '</BucketLoggingStatus>'
method = 'PUT'
object = ''
params = {}
headers = {}
params['logging'] = ''
return self.http_request(method, sourcebucket, object, headers, body, params)
def put_website(self, bucket, indexfile, errorfile):
'''
Put bucket website
:type bucket: string
:param
:type indexfile: string
:param: the object that contain index page
:type errorfile: string
:param: the object taht contain error page
Returns:
HTTP Response
'''
indexfile = convert_utf8(indexfile)
errorfile = convert_utf8(errorfile)
body = '<WebsiteConfiguration><IndexDocument><Suffix>%s</Suffix></IndexDocument><ErrorDocument><Key>%s</Key></ErrorDocument></WebsiteConfiguration>' % (indexfile, errorfile)
method = 'PUT'
object = ''
headers = {}
params = {}
params['website'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def put_lifecycle(self, bucket, lifecycle):
'''
Put bucket lifecycle
:type bucket: string
:param
:type lifecycle: string
:param: lifecycle configuration
Returns:
HTTP Response
'''
body = lifecycle
method = 'PUT'
object = ''
headers = {}
params = {}
params['lifecycle'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def put_cors(self, bucket, cors_xml, headers=None):
'''
Put bucket cors
:type bucket: string
:param
:type cors_xml: string
:param: the xml that contain cors rules
Returns:
HTTP Response
'''
body = cors_xml
method = 'PUT'
object = ''
if not headers:
headers = {}
headers['Content-Length'] = str(len(body))
base64md5 = base64.encodestring(md5.new(body).digest()).strip()
headers['Content-MD5'] = base64md5
params = {}
params['cors'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def put_bucket_with_location(self, bucket, acl='', location='', headers=None):
'''
Create bucket
:type bucket: string
:param
:type acl: string
:param: one of private public-read public-read-write
:type location: string
:param:
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
if acl != '':
if "AWS" == self.provider:
headers['x-amz-acl'] = acl
else:
headers['x-oss-acl'] = acl
params = {}
body = ''
if location != '':
body = r'<CreateBucketConfiguration>'
body += r'<LocationConstraint>'
body += location
body += r'</LocationConstraint>'
body += r'</CreateBucketConfiguration>'
method = 'PUT'
object = ''
return self.http_request(method, bucket, object, headers, body, params)
def delete_bucket(self, bucket, headers=None):
'''
Delete bucket
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def delete_website(self, bucket, headers=None):
'''
Delete bucket website
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
params['website'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def delete_lifecycle(self, bucket, headers=None):
'''
Delete bucket lifecycle
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
params['lifecycle'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def delete_logging(self, bucket, headers=None):
'''
Delete bucket logging
:type bucket: string
:param:
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
params['logging'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def delete_cors(self, bucket, headers=None):
'''
Delete bucket cors
:type bucket: string
:param:
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
params['cors'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def put_object_with_data(self, bucket, object, input_content, content_type='', headers=None, params=None):
'''
Put object into bucket, the content of object is from input_content
'''
return self.put_object_from_string(bucket, object, input_content, content_type, headers, params)
def put_object_from_string(self, bucket, object, input_content, content_type='', headers=None, params=None):
'''
Put object into bucket, the content of object is from input_content
:type bucket: string
:param
:type object: string
:param
:type input_content: string
:param
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = "PUT"
return self._put_or_post_object_from_string(method, bucket, object, input_content, content_type, headers, params)
def post_object_from_string(self, bucket, object, input_content, content_type='', headers=None, params=None):
'''
Post object into bucket, the content of object is from input_content
:type bucket: string
:param
:type object: string
:param
:type input_content: string
:param
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = "POST"
return self._put_or_post_object_from_string(method, bucket, object, input_content, content_type, headers, params)
def _put_or_post_object_from_string(self, method, bucket, object, input_content, content_type, headers, params):
if not headers:
headers = {}
if not content_type:
content_type = get_content_type_by_filename(object)
if not headers.has_key('Content-Type') and not headers.has_key('content-type'):
headers['Content-Type'] = content_type
headers['Content-Length'] = str(len(input_content))
fp = StringIO.StringIO(input_content)
if "POST" == method:
res = self.post_object_from_fp(bucket, object, fp, content_type, headers, params)
else:
res = self.put_object_from_fp(bucket, object, fp, content_type, headers, params)
fp.close()
return res
def _open_conn_to_put_object(self, method, bucket, object, filesize, content_type=DefaultContentType, headers=None, params=None):
'''
NOT public API
Open a connectioon to put object
:type bucket: string
:param
:type filesize: int
:param
:type object: string
:param
:type input_content: string
:param
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
Initialized HTTPConnection
'''
if not params:
params = {}
if not headers:
headers = {}
object = convert_utf8(object)
resource = "/%s/" % bucket
if not bucket:
resource = "/"
resource = convert_utf8(resource)
resource = "%s%s%s" % (resource, object, get_resource(params))
object = oss_quote(object)
url = "/%s" % object
if bucket:
headers['Host'] = "%s.%s" % (bucket, self.host)
if not is_oss_host(self.host):
headers['Host'] = self.host
else:
headers['Host'] = self.host
if is_ip(self.host):
url = "/%s/%s" % (bucket, object)
headers['Host'] = self.host
url = append_param(url, params)
date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
if check_bucket_valid(bucket) and not is_ip(self.host):
conn = self.get_connection(headers['Host'])
else:
conn = self.get_connection()
conn.putrequest(method, url)
content_type = convert_utf8(content_type)
if not headers.has_key('Content-Type') and not headers.has_key('content-type'):
headers['Content-Type'] = content_type
headers["Content-Length"] = filesize
headers["Date"] = date
headers["Expect"] = "100-Continue"
headers['User-Agent'] = self.agent
for k in headers.keys():
conn.putheader(str(k), str(headers[k]))
if '' != self.secret_access_key and '' != self.access_id:
auth = self._create_sign_for_normal_auth(method, headers, resource)
conn.putheader("Authorization", auth)
conn.endheaders()
return conn
def put_object_from_file(self, bucket, object, filename, content_type='', headers=None, params=None):
'''
put object into bucket, the content of object is read from file
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
fp = open(filename, 'rb')
if not content_type:
content_type = get_content_type_by_filename(filename)
res = self.put_object_from_fp(bucket, object, fp, content_type, headers, params)
fp.close()
return res
def post_object_from_file(self, bucket, object, filename, content_type='', headers=None, params=None):
'''
post object into bucket, the content of object is read from file
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
fp = open(filename, 'rb')
if not content_type:
content_type = get_content_type_by_filename(filename)
res = self.post_object_from_fp(bucket, object, fp, content_type, headers, params)
fp.close()
return res
def view_bar(self, num=1, sum=100):
rate = float(num) / float(sum)
rate_num = int(rate * 100)
print '\r%d%% ' % (rate_num),
sys.stdout.flush()
def put_object_from_fp(self, bucket, object, fp, content_type=DefaultContentType, headers=None, params=None):
'''
Put object into bucket, the content of object is read from file pointer
:type bucket: string
:param
:type object: string
:param
:type fp: file
:param: the pointer of the read file
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'PUT'
return self._put_or_post_object_from_fp(method, bucket, object, fp, content_type, headers, params)
def post_object_from_fp(self, bucket, object, fp, content_type=DefaultContentType, headers=None, params=None):
'''
Post object into bucket, the content of object is read from file pointer
:type bucket: string
:param
:type object: string
:param
:type fp: file
:param: the pointer of the read file
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'POST'
return self._put_or_post_object_from_fp(method, bucket, object, fp, content_type, headers, params)
def _put_or_post_object_from_fp(self, method, bucket, object, fp, content_type=DefaultContentType, headers=None, params=None):
tmp_object = object
tmp_headers = {}
tmp_params = {}
if headers and isinstance(headers, dict):
tmp_headers = headers.copy()
if params and isinstance(params, dict):
tmp_params = params.copy()
fp.seek(os.SEEK_SET, os.SEEK_END)
filesize = fp.tell()
fp.seek(os.SEEK_SET)
conn = self._open_conn_to_put_object(method, bucket, object, filesize, content_type, headers, params)
totallen = 0
l = fp.read(self.SendBufferSize)
retry_times = 0
while len(l) > 0:
if retry_times > 100:
print "reach max retry times;%s" % retry_times
raise
try:
conn.send(l)
retry_times = 0
except:
retry_times += 1
continue
totallen += len(l)
if self.show_bar:
self.view_bar(totallen, filesize)
l = fp.read(self.SendBufferSize)
res = conn.getresponse()
if check_redirect(res):
self.host = helper_get_host_from_resp(res, bucket)
return self.put_object_from_fp(bucket, tmp_object, fp, content_type, tmp_headers, tmp_params)
return res
def get_object(self, bucket, object, headers=None, params=None):
'''
Get object
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'GET'
body = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_object_to_file(self, bucket, object, filename, headers=None):
'''
Get object and write the content of object into a file
:type bucket: string
:param
:type object: string
:param
:type filename: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
res = self.get_object(bucket, object, headers)
totalread = 0
if res.status / 100 == 2:
header = {}
header = convert_header2map(res.getheaders())
filesize = safe_get_element("content-length", header)
f = file(filename, 'wb')
data = ''
while True:
data = res.read(self.RecvBufferSize)
if data:
f.write(data)
totalread += len(data)
if self.show_bar:
self.view_bar(totalread, filesize)
else:
break
f.close()
# TODO: get object with flow
return res
def delete_object(self, bucket, object, headers=None):
'''
Delete object
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'DELETE'
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def head_object(self, bucket, object, headers=None):
'''
Head object, to get the meta message of object without the content
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'HEAD'
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def create_link_from_list(self, bucket, object, object_list=None, headers=None, params=None):
object_link_msg_xml = create_object_link_msg_xml_by_name(object_list)
return self.create_link(bucket, object, object_link_msg_xml, headers, params)
def create_link(self, bucket, object, object_link_msg_xml, headers=None, params=None):
'''
Create object link, merge all objects in object_link_msg_xml into one object
:type bucket: string
:param
:type object: string
:param
:type object_link_msg_xml: string
:param: xml format string, like
<CreateObjectLink>
<Part>
<PartNumber>N</PartNumber>
<PartName>objectN</PartName>
</Part>
</CreateObjectLink>
:type headers: dict
:param: HTTP header
:type params: dict
:param: parameters
Returns:
HTTP Response
'''
method = 'PUT'
if not headers:
headers = {}
if not params:
params = {}
if not headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(object)
headers['Content-Type'] = content_type
body = object_link_msg_xml
params['link'] = ''
headers['Content-Length'] = str(len(body))
return self.http_request(method, bucket, object, headers, body, params)
def get_link_index(self, bucket, object, headers=None, params=None):
'''
Get all objects linked
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'GET'
if not headers:
headers = {}
if not params:
params = {}
params['link'] = ''
body = ''
return self.http_request(method, bucket, object, headers, body, params)
def post_object_group(self, bucket, object, object_group_msg_xml, headers=None, params=None):
'''
Post object group, merge all objects in object_group_msg_xml into one object
:type bucket: string
:param
:type object: string
:param
:type object_group_msg_xml: string
:param: xml format string, like
<CreateFileGroup>
<Part>
<PartNumber>N</PartNumber>
<FileName>objectN</FileName>
<Etag>"47BCE5C74F589F4867DBD57E9CA9F808"</Etag>
</Part>
</CreateFileGroup>
:type headers: dict
:param: HTTP header
:type params: dict
:param: parameters
Returns:
HTTP Response
'''
method = 'POST'
if not headers:
headers = {}
if not params:
params = {}
if not headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(object)
headers['Content-Type'] = content_type
body = object_group_msg_xml
params['group'] = ''
headers['Content-Length'] = str(len(body))
return self.http_request(method, bucket, object, headers, body, params)
def get_object_group_index(self, bucket, object, headers=None):
'''
Get object group_index
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
headers["x-oss-file-group"] = ''
method = 'GET'
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def upload_part_from_file_given_pos(self, bucket, object, filename, offset, partsize, upload_id, part_number, headers=None, params=None):
if not params:
params = {}
params['partNumber'] = part_number
params['uploadId'] = upload_id
content_type = ''
return self.put_object_from_file_given_pos(bucket, object, filename, offset, partsize, content_type, headers, params)
def put_object_from_file_given_pos(self, bucket, object, filename, offset, partsize, content_type='', headers=None, params=None):
'''
Put object into bucket, the content of object is read from given posision of filename
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type offset: int
:param: the given position of file
:type partsize: int
:param: the size of read content
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
tmp_object = object
tmp_headers = {}
tmp_params = {}
if headers and isinstance(headers, dict):
tmp_headers = headers.copy()
if params and isinstance(params, dict):
tmp_params = params.copy()
fp = open(filename, 'rb')
if offset > os.path.getsize(filename):
fp.seek(os.SEEK_SET, os.SEEK_END)
else:
fp.seek(offset)
if not content_type:
content_type = get_content_type_by_filename(filename)
method = 'PUT'
conn = self._open_conn_to_put_object(method, bucket, object, partsize, content_type, headers, params)
left_len = partsize
while 1:
if left_len <= 0:
break
elif left_len < self.SendBufferSize:
buffer_content = fp.read(left_len)
else:
buffer_content = fp.read(self.SendBufferSize)
if buffer_content:
retry_times = 0
while 1:
if retry_times > 100:
print "reach max retry times;%s" % retry_times
fp.close()
raise
try:
conn.send(buffer_content)
retry_times = 0
break
except:
retry_times += 1
continue
left_len = left_len - len(buffer_content)
fp.close()
res = conn.getresponse()
if check_redirect(res):
self.host = helper_get_host_from_resp(res, bucket)
return self.put_object_from_file_given_pos(bucket, tmp_object, filename, offset, partsize
, content_type, tmp_headers, tmp_params)
return res
def upload_large_file(self, bucket, object, filename, thread_num=10, max_part_num=1000, headers=None):
'''
Upload large file, the content is read from filename.
The large file is splitted into many parts. It will put the many parts into bucket
and then merge all the parts into one object.
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type thread_num: int
:param
:type max_part_num: int
:param
:type headers: dict
:param
Returns:
HTTP Response
'''
#split the large file into 1000 parts or many parts
#get part_msg_list
if not headers:
headers = {}
filename = convert_utf8(filename)
part_msg_list = split_large_file(filename, object, max_part_num)
#make sure all the parts are put into same bucket
if len(part_msg_list) < thread_num and len(part_msg_list) != 0:
thread_num = len(part_msg_list)
step = len(part_msg_list) / thread_num
retry_times = self.retry_times
while(retry_times >= 0):
try:
threadpool = []
for i in xrange(0, thread_num):
if i == thread_num - 1:
end = len(part_msg_list)
else:
end = i * step + step
begin = i * step
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
current = PutObjectGroupWorker(oss, bucket, filename, part_msg_list[begin:end], retry_times)
threadpool.append(current)
current.start()
for item in threadpool:
item.join()
break
except:
retry_times = retry_times -1
if -1 >= retry_times:
print "after retry %s, failed, upload large file failed!" % retry_times
return
#get xml string that contains msg of object group
object_group_msg_xml = create_object_group_msg_xml(part_msg_list)
content_type = get_content_type_by_filename(filename)
content_type = convert_utf8(content_type)
if not headers.has_key('Content-Type'):
headers['Content-Type'] = content_type
return self.post_object_group(bucket, object, object_group_msg_xml, headers)
def upload_large_file_by_link(self, bucket, object, filename, thread_num=5, max_part_num=50, headers=None):
'''
Upload large file, the content is read from filename. The large file is splitted into many parts.
all the parts are put into bucket and then merged into one object.
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type thread_num: int
:param
:type max_part_num: int
:param
:type headers: dict
:param
Returns:
HTTP Response
'''
#split the large file into 100 parts or many parts
#get part_msg_list
if not headers:
headers = {}
filename = convert_utf8(filename)
part_msg_list = split_large_file(filename, object, max_part_num)
#make sure all the parts are put into same bucket
if len(part_msg_list) < thread_num and len(part_msg_list) != 0:
thread_num = len(part_msg_list)
step = len(part_msg_list) / thread_num
retry_times = self.retry_times
while(retry_times >= 0):
try:
threadpool = []
for i in xrange(0, thread_num):
if i == thread_num - 1:
end = len(part_msg_list)
else:
end = i * step + step
begin = i * step
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
current = PutObjectLinkWorker(oss, bucket, filename, part_msg_list[begin:end], self.retry_times)
threadpool.append(current)
current.start()
for item in threadpool:
item.join()
break
except:
retry_times = retry_times -1
if -1 >= retry_times:
print "after retry %s, failed, upload large file failed!" % retry_times
return
#get xml string that contains msg of object link
object_link_msg_xml = create_object_link_msg_xml(part_msg_list)
content_type = get_content_type_by_filename(filename)
content_type = convert_utf8(content_type)
if not headers.has_key('Content-Type'):
headers['Content-Type'] = content_type
return self.create_link(bucket, object, object_link_msg_xml, headers)
def copy_object(self, source_bucket, source_object, target_bucket, target_object, headers=None):
'''
Copy object
:type source_bucket: string
:param
:type source_object: string
:param
:type target_bucket: string
:param
:type target_object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
source_object = convert_utf8(source_object)
source_object = oss_quote(source_object)
headers['x-oss-copy-source'] = "/%s/%s" % (source_bucket, source_object)
method = 'PUT'
body = ''
params = {}
return self.http_request(method, target_bucket, target_object, headers, body, params)
def init_multi_upload(self, bucket, object, headers=None, params=None):
'''
Init multi upload
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not params:
params = {}
if not headers:
headers = {}
method = 'POST'
body = ''
params['uploads'] = ''
if isinstance(headers, dict) and not headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(object)
headers['Content-Type'] = content_type
return self.http_request(method, bucket, object, headers, body, params)
def get_all_parts(self, bucket, object, upload_id, max_parts=None, part_number_marker=None):
'''
List all upload parts of given upload_id
:type bucket: string
:param
:type object: string
:param
:type upload_id: string
:param
:type max_parts: int
:param
:type part_number_marker: string
:param
Returns:
HTTP Response
'''
method = 'GET'
headers = {}
body = ''
params = {}
params['uploadId'] = upload_id
if max_parts:
params['max-parts'] = max_parts
if part_number_marker:
params['part-number-marker'] = part_number_marker
return self.http_request(method, bucket, object, headers, body, params)
def get_all_multipart_uploads(self, bucket, delimiter=None, max_uploads=None, key_marker=None, prefix=None, upload_id_marker=None, headers=None):
'''
List all upload_ids and their parts
:type bucket: string
:param
:type delimiter: string
:param
:type max_uploads: string
:param
:type key_marker: string
:param
:type prefix: string
:param
:type upload_id_marker: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['uploads'] = ''
if delimiter:
params['delimiter'] = delimiter
if max_uploads:
params['max-uploads'] = max_uploads
if key_marker:
params['key-marker'] = key_marker
if prefix:
params['prefix'] = prefix
if upload_id_marker:
params['upload-id-marker'] = upload_id_marker
return self.http_request(method, bucket, object, headers, body, params)
def upload_part(self, bucket, object, filename, upload_id, part_number, headers=None, params=None):
'''
Upload the content of filename as one part of given upload_id
:type bucket: string
:param
:type object: string
:param
:type filename: string
:param
:type upload_id: string
:param
:type part_number: int
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not params:
params = {}
params['partNumber'] = part_number
params['uploadId'] = upload_id
content_type = ''
return self.put_object_from_file(bucket, object, filename, content_type, headers, params)
def upload_part_from_string(self, bucket, object, data, upload_id, part_number, headers=None, params=None):
'''
Upload the content of string as one part of given upload_id
:type bucket: string
:param
:type object: string
:param
:type data: string
:param
:type upload_id: string
:param
:type part_number: int
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not params:
params = {}
params['partNumber'] = part_number
params['uploadId'] = upload_id
content_type = ''
fp = StringIO.StringIO(data)
return self.put_object_from_fp(bucket, object, fp, content_type, headers, params)
def copy_object_as_part(self, source_bucket, source_object, target_bucket,
target_object, upload_id, part_number, headers=None, params=None):
'''
Upload a part with data copy from srouce object in source bucket
:type source_bucket: string
:param
:type source_object: string
:param
:type target_bucket: string
:param
:type target_object: string
:param
:type data: string
:param
:type upload_id: string
:param
:type part_number: int
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
source_object = convert_utf8(source_object)
source_object = oss_quote(source_object)
method = 'PUT'
params['partNumber'] = part_number
params['uploadId'] = upload_id
headers['x-oss-copy-source'] = "/%s/%s" % (source_bucket, source_object)
body = ''
return self.http_request(method, target_bucket, target_object, headers, body, params)
def complete_upload(self, bucket, object, upload_id, part_msg_xml, headers=None, params=None):
'''
Finish multiupload and merge all the parts in part_msg_xml as a object.
:type bucket: string
:param
:type object: string
:param
:type upload_id: string
:param
:type part_msg_xml: string
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
method = 'POST'
body = part_msg_xml
headers['Content-Length'] = str(len(body))
params['uploadId'] = upload_id
if not headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(object)
headers['Content-Type'] = content_type
return self.http_request(method, bucket, object, headers, body, params)
def cancel_upload(self, bucket, object, upload_id, headers=None, params=None):
'''
Cancel multiupload and delete all parts of given upload_id
:type bucket: string
:param
:type object: string
:param
:type upload_id: string
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not params:
params = {}
method = 'DELETE'
upload_id = convert_utf8(upload_id)
params['uploadId'] = upload_id
body = ''
return self.http_request(method, bucket, object, headers, body, params)
def multi_upload_file(self, bucket, object, filename, upload_id='', thread_num=10, max_part_num=10000, headers=None, params=None):
'''
Upload large file, the content is read from filename. The large file is splitted into many parts. It will put the many parts into bucket and then merge all the parts into one object.
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type upload_id: string
:param
:type thread_num: int
:param
:type max_part_num: int
:param
:type headers: dict
:param
:type params: dict
:param
Returns:
HTTP Response
'''
tmp_headers = {}
if headers and isinstance(headers, dict):
tmp_headers = headers.copy()
if not tmp_headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(filename)
tmp_headers['Content-Type'] = content_type
#get init upload_id
if not upload_id:
res = self.init_multi_upload(bucket, object, tmp_headers, params)
body = res.read()
if res.status == 200:
h = GetInitUploadIdXml(body)
upload_id = h.upload_id
else:
err = ErrorXml(body)
raise Exception("%s, %s" %(res.status, err.msg))
if not upload_id:
raise Exception("-1, Cannot get upload id.")
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
return multi_upload_file2(oss, bucket, object, filename, upload_id, thread_num, max_part_num, self.retry_times, headers, params)
def delete_objects(self, bucket, object_list=None, headers=None, params=None):
'''
Batch delete objects
:type bucket: string
:param:
:type object_list: list
:param:
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
Returns:
HTTP Response
'''
if not object_list:
object_list = []
object_list_xml = create_delete_object_msg_xml(object_list)
return self.batch_delete_object(bucket, object_list_xml, headers, params)
def batch_delete_object(self, bucket, object_list_xml, headers=None, params=None):
'''
Delete the objects in object_list_xml
:type bucket: string
:param:
:type object_list_xml: string
:param:
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
method = 'POST'
object = ''
body = object_list_xml
headers['Content-Length'] = str(len(body))
params['delete'] = ''
base64md5 = base64.encodestring(md5.new(body).digest()).strip()
headers['Content-MD5'] = base64md5
return self.http_request(method, bucket, object, headers, body, params)
def list_objects(self, bucket, prefix=''):
'''
:type bucket: string
:param:
:type prefix: string
:param:
Returns:
a list that contains the objects in bucket with prefix
'''
get_instance = GetAllObjects()
marker_input = ''
object_list = []
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
(object_list, marker_output) = get_instance.get_object_in_bucket(oss, bucket, marker_input, prefix)
return object_list
def list_objects_dirs(self, bucket, prefix='', delimiter=''):
'''
:type bucket: string
:param:
:type prefix: string
:param:
:type prefix: delimiter
:param:
Returns:
a list that contains the objects in bucket with prefix
'''
get_instance = GetAllObjects()
marker_input = ''
object_list = []
dir_list = []
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
(object_list, dir_list) = get_instance.get_all_object_dir_in_bucket(oss, bucket, marker_input, prefix, delimiter)
return (object_list, dir_list)
def batch_delete_objects(self, bucket, object_list=None):
'''
:type bucket: string
:param:
:type object_list: object name list
:param:
Returns:
True or False
'''
if not object_list:
object_list = []
object_list_xml = create_delete_object_msg_xml(object_list)
try:
res = self.batch_delete_object(bucket, object_list_xml)
if res.status / 100 == 2:
return True
except:
pass
return False
def get_object_info(self, bucket, object, headers=None, params=None):
'''
Get object information
:type bucket: string
:param:
:type object: string
:param:
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
method = 'GET'
body = ''
params['objectInfo'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def options(self, bucket, object='', headers=None, params=None):
'''
Options object to determine if user can send the actual HTTP request
:type bucket: string
:param:
:type object: string
:param:
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
method = 'OPTIONS'
body = ''
return self.http_request(method, bucket, object, headers, body, params)
|
{
"content_hash": "7b9e733ff792952a0e5ec70d56f13ce9",
"timestamp": "",
"source": "github",
"line_count": 2028,
"max_line_length": 197,
"avg_line_length": 29.381163708086785,
"alnum_prop": 0.5390450616765965,
"repo_name": "luzhijun/Optimization",
"id": "2a937cccdd325cdcbe097d457e3b4fce883dbb83",
"size": "60735",
"binary": false,
"copies": "3",
"ref": "refs/heads/gh-pages",
"path": "cma-es/batchcompute_python_sdk/vp/oss_python_sdk/oss_api.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11592"
},
{
"name": "C++",
"bytes": "41273"
},
{
"name": "CSS",
"bytes": "8912"
},
{
"name": "HTML",
"bytes": "845295"
},
{
"name": "JavaScript",
"bytes": "185036"
},
{
"name": "Jupyter Notebook",
"bytes": "1680887"
},
{
"name": "Makefile",
"bytes": "166"
},
{
"name": "Matlab",
"bytes": "2304"
},
{
"name": "Python",
"bytes": "1912745"
},
{
"name": "Shell",
"bytes": "333"
}
],
"symlink_target": ""
}
|
class Graph:
def __init__(self, graph_dict=None):
"""initializes a graph object"""
if graph_dict is None:
graph_dict = {}
self.__graph_dist = graph_dict
def __str__(self):
return "Undirected Graph \nNodes: %s \nEdges: %s" % (
self.list_vertices(), self.list_edges())
def graph(self):
return self.__graph_dist
def list_vertices(self):
return list(self.__graph_dist.keys())
def list_edges(self):
return self.__generate_edges()
def __generate_edges(self):
graph_edges = []
for vertex in self.__graph_dist:
for neighbour in self.__graph_dist[vertex]:
if {vertex, neighbour} not in graph_edges:
graph_edges.append({vertex, neighbour})
return graph_edges
def add_edge(self, edge):
u, v = set(edge)
if u not in self.__graph_dist:
self.__graph_dist[u] = v
else:
self.__graph_dist[u].append(v)
if v not in self.__graph_dist:
self.__graph_dist[v] = u
else:
self.__graph_dist[v].append(u)
def add_vertex(self, vertex):
if vertex not in self.__graph_dist:
self.__graph_dist[vertex] = []
def has_edge(self, edge):
u, v = set(edge)
return (v in self.__graph_dist.get(u, []))
def delete_edge(self, edge):
u, v = set(edge)
if self.has_edge(edge):
self.__graph_dist[v].remove(u)
self.__graph_dist[u].remove(v)
def delete_vertex(self, vertex):
if vertex in self.__graph_dist:
for edge in self.__graph_dist[vertex]:
self.delete_edge((vertex, edge))
del self.__graph_dist[vertex]
def find_path(self, start_vertex, end_vertex, path=None):
if path is None:
path = []
path.append(start_vertex)
if start_vertex == end_vertex:
return path
if start_vertex not in self.__graph_dist:
return None
for vertex in self.__graph_dist[start_vertex]:
if vertex not in path:
extended_path = self.find_path(vertex, end_vertex, path)
if extended_path:
return extended_path
return None
if __name__ == "__main__":
g = {
"a": ["d"],
"b": ["c"],
"c": ["b", "c", "d", "e"],
"d": ["a", "c"],
"e": ["c"]
}
graph = Graph(g)
print graph
print("Vertices of graph:")
print(graph.list_vertices())
print("\nEdges of graph:")
print(graph.list_edges())
print("\nAdding a vertice")
graph.add_vertex("g")
print (graph.list_vertices())
graph.add_edge(("g", "a"))
graph.add_edge(("a", "c"))
graph.add_edge(("f", "c"))
print("\nEdges of graph:")
print(graph.list_edges())
print (graph.list_vertices())
print(graph.graph())
print(graph.has_edge(("a", "c")))
print(graph.graph())
print("\nDeleting edge (a, c):")
graph.delete_edge(("a", "c"))
print(graph.list_edges())
print (graph.list_vertices())
print(graph.graph())
print("\nDeleting vertex a:")
graph.delete_vertex("a")
print (graph.list_vertices())
print(graph.list_edges())
print(graph.graph())
print("\nPath between b - e")
print(graph.find_path("b", "e"))
|
{
"content_hash": "3f52e1d07946da85169af28299e6b3ec",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 72,
"avg_line_length": 27.629032258064516,
"alnum_prop": 0.5224751897256276,
"repo_name": "codervikash/algorithms",
"id": "8eb07347336360e0e897a02fabb64477591c429b",
"size": "3426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Graphs/graph.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3500"
},
{
"name": "JavaScript",
"bytes": "6523"
},
{
"name": "Python",
"bytes": "55356"
}
],
"symlink_target": ""
}
|
import os
import sys
#Adding directory to the path where Python searches for modules
cmd_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/Crypto/modules/')
sys.path.insert(0, cmd_folder)
import mtrand
if __name__ == "__main__":
mt= []
mtrand.init_by_array(19650218)
n= 1
for i in range(0, n):
randomnum= mtrand.genrand_int32()
mt.append(randomnum)
mtrand.untemper(mt)
|
{
"content_hash": "326d169abf578624f96c5992c63a31cb",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 89,
"avg_line_length": 26.5625,
"alnum_prop": 0.6729411764705883,
"repo_name": "arvinddoraiswamy/blahblah",
"id": "ac57de74ff17ed295defc5553a33c1e7c99f1a3e",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cryptopals/Set3/c23.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4477"
},
{
"name": "Python",
"bytes": "149921"
},
{
"name": "Ruby",
"bytes": "2455"
},
{
"name": "Shell",
"bytes": "20492"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
import sys
import pip
from cloudify.utils import LocalCommandRunner
from cloudify import utils
def extract_plugin_name(plugin_url):
previous_cwd = os.getcwd()
fetch_plugin_from_pip_by_url = not os.path.isdir(plugin_url)
plugin_dir = plugin_url
try:
if fetch_plugin_from_pip_by_url:
plugin_dir = tempfile.mkdtemp()
req_set = pip.req.RequirementSet(build_dir=None,
src_dir=None,
download_dir=None)
req_set.unpack_url(link=pip.index.Link(plugin_url),
location=plugin_dir,
download_dir=None,
only_download=False)
os.chdir(plugin_dir)
return LocalCommandRunner(
host=utils.get_local_ip()
).run('cmd.exe /c "{0} {1} {2}"'.format(
sys.executable,
os.path.join(os.path.dirname(__file__), 'extract_package_name.py'),
plugin_dir)).std_out
finally:
os.chdir(previous_cwd)
if fetch_plugin_from_pip_by_url:
shutil.rmtree(plugin_dir)
def extract_module_paths(module_name):
module_paths = []
files = LocalCommandRunner(host=utils.get_local_ip())\
.run('cmd /c "{0}\Scripts\pip.exe show -f {1}"'
.format(sys.prefix, module_name)).std_out.splitlines()
for module in files:
if module.endswith(".py") and "__init__" not in module:
if module.endswith("-script.py"):
script_stripped = module[:-len("-script.py")]
potential_exe_file = "{0}.exe".format(script_stripped)
if potential_exe_file in files:
# file is a console script "entry_point"
continue
# the files paths are relative to the package __init__.py file.
module_paths.append(
module.replace("..\\", "").replace("\\", ".")
.replace(".py", "")
.strip())
return ','.join(module_paths)
|
{
"content_hash": "5ca4ddb1cd55dfe5b01e4e944567bb2e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 36.741379310344826,
"alnum_prop": 0.5330830595964337,
"repo_name": "konradxyz/cloudify-manager",
"id": "fa9b7ec9a51492a110a969428effbd46ee3d84dc",
"size": "2942",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "plugins/windows-plugin-installer/windows_plugin_installer/plugin_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "3344"
},
{
"name": "Python",
"bytes": "822901"
},
{
"name": "Shell",
"bytes": "16706"
}
],
"symlink_target": ""
}
|
from activitystreams.models.activity import Activity
from dino import environ
from dino.config import SessionKeys
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
class BaseValidator(object):
def validate_request(self, activity: Activity) -> (bool, str):
if not hasattr(activity.actor, 'id') or activity.actor.id is None:
return False, 'no ID on actor'
session_user_id = environ.env.session.get('user_id', 'NOT_FOUND_IN_SESSION')
if str(activity.actor.id).strip() != str(session_user_id).strip():
error_msg = "user_id in session '%s' doesn't match user_id in request '%s'"
return False, error_msg % (session_user_id, activity.actor.id)
return True, None
def validate_login(self, user_id: str, token: str) -> (bool, str):
"""
checks whether required data was received and that it validates with community (not tampered with)
:param user_id: the id of the user
:param token: the token of the user to verify
:return: tuple(Boolean, String): (is_valid, error_message)
"""
is_valid, error_msg, session = environ.env.auth.authenticate_and_populate_session(user_id, token)
if not is_valid:
return False, error_msg, None
is_valid, error_msg = self.validate_session(session)
return is_valid, error_msg, session
def validate_session(self, session: dict) -> (bool, str):
"""
validate that all required parameters were send from the client side
:param session: the session dict to validate
:return: tuple(Boolean, String): (is_valid, error_message)
"""
for session_key in SessionKeys:
key = session_key.value
if key not in SessionKeys.requires_session_keys.value:
continue
if key not in session:
return False, '"%s" is a required parameter' % key
val = session[key]
if val is None or val == '':
return False, '"%s" is a required parameter' % key
return True, None
|
{
"content_hash": "88641ea5dce4d0d869d527962158e32c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 106,
"avg_line_length": 38.30909090909091,
"alnum_prop": 0.6193640246796392,
"repo_name": "thenetcircle/dino",
"id": "d56d0d1fd554f9fae4a35498b9c0d70a871013df",
"size": "2671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dino/validation/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2042"
},
{
"name": "Dockerfile",
"bytes": "1626"
},
{
"name": "HTML",
"bytes": "48902"
},
{
"name": "JavaScript",
"bytes": "59824"
},
{
"name": "Python",
"bytes": "1420576"
},
{
"name": "Shell",
"bytes": "15192"
}
],
"symlink_target": ""
}
|
"""
raven.utils.serializer.manager
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import logging
__all__ = ('register', 'transform')
logger = logging.getLogger('sentry.errors.serializer')
class SerializationManager(object):
logger = logger
def __init__(self):
self.__registry = []
self.__serializers = {}
@property
def serializers(self):
# XXX: Would serializers ever need state that we shouldnt cache them?
for serializer in self.__registry:
yield serializer
def register(self, serializer):
if serializer not in self.__registry:
self.__registry.append(serializer)
return serializer
class Serializer(object):
logger = logger
def __init__(self, manager):
self.manager = manager
self.context = set()
self.serializers = []
for serializer in manager.serializers:
self.serializers.append(serializer(self))
def close(self):
del self.serializers
del self.context
def transform(self, value, **kwargs):
"""
Primary function which handles recursively transforming
values via their serializers
"""
if value is None:
return None
objid = id(value)
if objid in self.context:
return '<...>'
self.context.add(objid)
try:
for serializer in self.serializers:
if serializer.can(value):
try:
return serializer.serialize(value, **kwargs)
except Exception, e:
logger.exception(e)
return unicode(type(value))
# if all else fails, lets use the repr of the object
try:
return self.transform(repr(value), **kwargs)
except Exception, e:
logger.exception(e)
# It's common case that a model's __unicode__ definition may try to query the database
# which if it was not cleaned up correctly, would hit a transaction aborted exception
return unicode(type(value))
finally:
self.context.remove(objid)
manager = SerializationManager()
register = manager.register
def transform(value, manager=manager, **kwargs):
serializer = Serializer(manager)
try:
return serializer.transform(value, **kwargs)
finally:
serializer.close()
|
{
"content_hash": "9449b7818d32cd632cbb085f8389862c",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 102,
"avg_line_length": 28.406593406593405,
"alnum_prop": 0.581431334622824,
"repo_name": "collective/mr.poe",
"id": "33ae3959dbc2278d59b0ad279ec8eb3138cffc9c",
"size": "2585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raven/utils/serializer/manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "285308"
}
],
"symlink_target": ""
}
|
import tempfile
import boto3
import pydash
# noinspection PyUnresolvedReferences
from infra_buddy.commandline import cli
from infra_buddy.commands.bootstrap import command as bcommand
from testcase_parent import ParentTestCase
import unittest
class BootStrapTestCase(ParentTestCase):
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
super(BootStrapTestCase, cls).setUpClass()
def test_boostrap(self):
environments = ['ci', 'prod']
gen_keys = ["{env}-{app}".format(env=env,app=self.test_deploy_ctx.application) for env in environments]
tempdir = tempfile.mkdtemp()
bcommand.do_command(deploy_ctx=self.test_deploy_ctx, environments=environments,destination=tempdir)
client = boto3.client('ec2', region_name=self.test_deploy_ctx.region)
try:
res = client.describe_key_pairs()
known = pydash.pluck(res['KeyPairs'], 'KeyName')
for key in gen_keys:
self.assertTrue(key in known,"Did not generate key - {}".format(key))
finally:
for gen_key in gen_keys:
client.delete_key_pair(KeyName=gen_key)
self.clean_dir(tempdir)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "fe9a1ac08305d70b3709d6dad717f2f6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 111,
"avg_line_length": 33.026315789473685,
"alnum_prop": 0.6581673306772908,
"repo_name": "AlienVault-Engineering/infra-buddy",
"id": "c66fa1e23ba1cf3db04df59c56f7f4e02fa06d22",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/unittest/python/bootstrap_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "179549"
}
],
"symlink_target": ""
}
|
"""
airPy is a flight controller based on pyboard and written in micropython.
The MIT License (MIT)
Copyright (c) 2016 Fabrizio Scimia, fabrizio.scimia@gmail.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pyb import UART
import array
# import util.airpy_logger as logger
class SBUSReceiver:
def __init__(self):
self.sbus = UART(3, 100000)
self.sbus.init(100000, bits=8, parity=0, stop=2, timeout_char=3, read_buf_len=250)
# constants
self.START_BYTE = b'0f'
self.END_BYTE = b'00'
self.SBUS_FRAME_LEN = 25
self.SBUS_NUM_CHAN = 18
self.OUT_OF_SYNC_THD = 10
self.SBUS_NUM_CHANNELS = 18
self.SBUS_SIGNAL_OK = 0
self.SBUS_SIGNAL_LOST = 1
self.SBUS_SIGNAL_FAILSAFE = 2
# Stack Variables initialization
self.validSbusFrame = 0
self.lostSbusFrame = 0
self.frameIndex = 0
self.resyncEvent = 0
self.outOfSyncCounter = 0
self.sbusBuff = bytearray(1) # single byte used for sync
self.sbusFrame = bytearray(25) # single SBUS Frame
self.sbusChannels = array.array('H', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # RC Channels
self.isSync = False
self.startByteFound = False
self.failSafeStatus = self.SBUS_SIGNAL_FAILSAFE
# logger.info("SBUS Stack Started")
def get_rx_channels(self):
return self.sbusChannels
def get_rx_channel(self, num_ch):
return self.sbusChannels[num_ch]
def get_failsafe_status(self):
return self.failSafeStatus
def get_rx_report(self):
rep = {}
rep['Valid Frames'] = self.validSbusFrame
rep['Lost Frames'] = self.lostSbusFrame
rep['Resync Events'] = self.resyncEvent
return rep
def decode_frame(self):
# TODO: DoubleCheck if it has to be removed
for i in range(0, self.SBUS_NUM_CHANNELS - 2):
self.sbusChannels[i] = 0
# counters initialization
byte_in_sbus = 1
bit_in_sbus = 0
ch = 0
bit_in_channel = 0
for i in range(0, 175): # TODO Generalization
if self.sbusFrame[byte_in_sbus] & (1 << bit_in_sbus):
self.sbusChannels[ch] |= (1 << bit_in_channel)
bit_in_sbus += 1
bit_in_channel += 1
if bit_in_sbus == 8:
bit_in_sbus = 0
byte_in_sbus += 1
if bit_in_channel == 11:
bit_in_channel = 0
ch += 1
# Decode Digitals Channels
# Digital Channel 1
if self.sbusFrame[self.SBUS_FRAME_LEN - 2] & (1 << 0):
self.sbusChannels[self.SBUS_NUM_CHAN - 2] = 1
else:
self.sbusChannels[self.SBUS_NUM_CHAN - 2] = 0
# Digital Channel 2
if self.sbusFrame[self.SBUS_FRAME_LEN - 2] & (1 << 1):
self.sbusChannels[self.SBUS_NUM_CHAN - 1] = 1
else:
self.sbusChannels[self.SBUS_NUM_CHAN - 1] = 0
# Failsafe
self.failSafeStatus = self.SBUS_SIGNAL_OK
if self.sbusFrame[self.SBUS_FRAME_LEN - 2] & (1 << 2):
self.failSafeStatus = self.SBUS_SIGNAL_LOST
if self.sbusFrame[self.SBUS_FRAME_LEN - 2] & (1 << 3):
self.failSafeStatus = self.SBUS_SIGNAL_FAILSAFE
def get_sync(self):
if self.sbus.any() > 0:
if self.startByteFound:
if self.frameIndex == (self.SBUS_FRAME_LEN - 1):
self.sbus.readinto(self.sbusBuff, 1) # end of frame byte
if self.sbusBuff[0] == 0: # TODO: Change to use constant var value
self.startByteFound = False
self.isSync = True
self.frameIndex = 0
else:
self.sbus.readinto(self.sbusBuff, 1) # keep reading 1 byte until the end of frame
self.frameIndex += 1
else:
self.frameIndex = 0
self.sbus.readinto(self.sbusBuff, 1) # read 1 byte
if self.sbusBuff[0] == 15: # TODO: Change to use constant var value
self.startByteFound = True
self.frameIndex += 1
def get_new_data(self):
if self.isSync:
if self.sbus.any() >= self.SBUS_FRAME_LEN:
self.sbus.readinto(self.sbusFrame, self.SBUS_FRAME_LEN) # read the whole frame
if (self.sbusFrame[0] == 15 and self.sbusFrame[
self.SBUS_FRAME_LEN - 1] == 0): # TODO: Change to use constant var value
self.validSbusFrame += 1
self.outOfSyncCounter = 0
self.decode_frame()
else:
self.lostSbusFrame += 1
self.outOfSyncCounter += 1
if self.outOfSyncCounter > self.OUT_OF_SYNC_THD:
self.isSync = False
self.resyncEvent += 1
else:
self.get_sync()
|
{
"content_hash": "ddf1e6e7ef3520f3d7d0f781565ae6c7",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 115,
"avg_line_length": 36.896341463414636,
"alnum_prop": 0.584200958519253,
"repo_name": "Sokrates80/air-py",
"id": "690385ee83e5919fdcf59b4f7b58b2d8de81525b",
"size": "6051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "receiver/sbus_receiver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155916"
}
],
"symlink_target": ""
}
|
import inspect
import numpy as np
import pytest
from astropy import units as u
from astropy.nddata.decorators import support_nddata
from astropy.nddata.nddata import NDData
from astropy.utils.exceptions import AstropyUserWarning
from astropy.wcs import WCS
class CCDData(NDData):
pass
@support_nddata
def wrapped_function_1(data, wcs=None, unit=None):
return data, wcs, unit
def test_pass_numpy():
data_in = np.array([1, 2, 3])
data_out, wcs_out, unit_out = wrapped_function_1(data=data_in)
assert data_out is data_in
assert wcs_out is None
assert unit_out is None
def test_pass_all_separate():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
data_out, wcs_out, unit_out = wrapped_function_1(
data=data_in, wcs=wcs_in, unit=unit_in
)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_pass_nddata_and_explicit():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
unit_in_alt = u.mJy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in)
with pytest.warns(
AstropyUserWarning,
match=(
"Property unit has been passed explicitly and as "
"an NDData property, using explicitly specified value"
),
) as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in, unit=unit_in_alt)
assert len(w) == 1
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in_alt
def test_pass_nddata_ignored():
data_in = np.array([1, 2, 3])
wcs_in = WCS(naxis=1)
unit_in = u.Jy
nddata_in = NDData(data_in, wcs=wcs_in, unit=unit_in, mask=[0, 1, 0])
with pytest.warns(
AstropyUserWarning,
match=(
"The following attributes were set on the data "
"object, but will be ignored by the function: mask"
),
) as w:
data_out, wcs_out, unit_out = wrapped_function_1(nddata_in)
assert len(w) == 1
assert data_out is data_in
assert wcs_out is wcs_in
assert unit_out is unit_in
def test_incorrect_first_argument():
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_2(something, wcs=None, unit=None):
pass
assert (
exc.value.args[0]
== "Can only wrap functions whose first positional argument is `data`"
)
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_3(something, data, wcs=None, unit=None):
pass
assert (
exc.value.args[0]
== "Can only wrap functions whose first positional argument is `data`"
)
with pytest.raises(ValueError) as exc:
@support_nddata
def wrapped_function_4(wcs=None, unit=None):
pass
assert (
exc.value.args[0]
== "Can only wrap functions whose first positional argument is `data`"
)
def test_wrap_function_no_kwargs():
@support_nddata
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
assert wrapped_function_5(nddata_in, [1, 2, 3]) is data_in
def test_wrap_function_repack_valid():
@support_nddata(repack=True, returns=["data"])
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
nddata_out = wrapped_function_5(nddata_in, [1, 2, 3])
assert isinstance(nddata_out, NDData)
assert nddata_out.data is data_in
def test_wrap_function_accepts():
class MyData(NDData):
pass
@support_nddata(accepts=MyData)
def wrapped_function_5(data, other_data):
return data
data_in = np.array([1, 2, 3])
nddata_in = NDData(data_in)
mydata_in = MyData(data_in)
assert wrapped_function_5(mydata_in, [1, 2, 3]) is data_in
with pytest.raises(
TypeError,
match=(
"Only NDData sub-classes that inherit "
"from MyData can be used by this function"
),
):
wrapped_function_5(nddata_in, [1, 2, 3])
def test_wrap_preserve_signature_docstring():
@support_nddata
def wrapped_function_6(data, wcs=None, unit=None):
"""
An awesome function
"""
pass
if wrapped_function_6.__doc__ is not None:
assert wrapped_function_6.__doc__.strip() == "An awesome function"
signature = inspect.signature(wrapped_function_6)
assert str(signature) == "(data, wcs=None, unit=None)"
def test_setup_failures1():
# repack but no returns
with pytest.raises(ValueError):
support_nddata(repack=True)
def test_setup_failures2():
# returns but no repack
with pytest.raises(ValueError):
support_nddata(returns=["data"])
def test_setup_failures9():
# keeps but no repack
with pytest.raises(ValueError):
support_nddata(keeps=["unit"])
def test_setup_failures3():
# same attribute in keeps and returns
with pytest.raises(ValueError):
support_nddata(repack=True, keeps=["mask"], returns=["data", "mask"])
def test_setup_failures4():
# function accepts *args
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures10():
# function accepts **kwargs
with pytest.raises(ValueError):
@support_nddata
def test(data, **kwargs):
pass
def test_setup_failures5():
# function accepts *args (or **kwargs)
with pytest.raises(ValueError):
@support_nddata
def test(data, *args):
pass
def test_setup_failures6():
# First argument is not data
with pytest.raises(ValueError):
@support_nddata
def test(img):
pass
def test_setup_failures7():
# accepts CCDData but was given just an NDData
with pytest.raises(TypeError):
@support_nddata(accepts=CCDData)
def test(data):
pass
test(NDData(np.ones((3, 3))))
def test_setup_failures8():
# function returns a different amount of arguments than specified. Using
# NDData here so we don't get into troubles when creating a CCDData without
# unit!
with pytest.raises(ValueError):
@support_nddata(repack=True, returns=["data", "mask"])
def test(data):
return 10
test(NDData(np.ones((3, 3)))) # do NOT use CCDData here.
def test_setup_failures11():
# function accepts no arguments
with pytest.raises(ValueError):
@support_nddata
def test():
pass
def test_setup_numpyarray_default():
# It should be possible (even if it's not advisable to use mutable
# defaults) to have a numpy array as default value.
@support_nddata
def func(data, wcs=np.array([1, 2, 3])):
return wcs
def test_still_accepts_other_input():
@support_nddata(repack=True, returns=["data"])
def test(data):
return data
assert isinstance(test(NDData(np.ones((3, 3)))), NDData)
assert isinstance(test(10), int)
assert isinstance(test([1, 2, 3]), list)
def test_accepting_property_normal():
# Accepts a mask attribute and takes it from the input
@support_nddata
def test(data, mask=None):
return mask
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with pytest.warns(AstropyUserWarning) as w:
assert test(ndd, mask=10) == 10
assert len(w) == 1
def test_parameter_default_identical_to_explicit_passed_argument():
# If the default is identical to the explicitly passed argument this
# should still raise a Warning and use the explicit one.
@support_nddata
def func(data, meta={"a": 1}):
return meta
with pytest.warns(AstropyUserWarning) as w:
assert func(NDData(1, meta={"b": 2}), {"a": 1}) == {"a": 1}
assert len(w) == 1
assert func(NDData(1, meta={"b": 2})) == {"b": 2}
def test_accepting_property_notexist():
# Accepts flags attribute but NDData doesn't have one
@support_nddata
def test(data, flags=10):
return flags
ndd = NDData(np.ones((3, 3)))
test(ndd)
def test_accepting_property_translated():
# Accepts a error attribute and we want to pass in uncertainty!
@support_nddata(mask="masked")
def test(data, masked=None):
return masked
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._mask = np.zeros((3, 3))
assert np.all(test(ndd) == 0)
# Use the explicitly given one (raises a Warning)
with pytest.warns(AstropyUserWarning) as w:
assert test(ndd, masked=10) == 10
assert len(w) == 1
def test_accepting_property_meta_empty():
# Meta is always set (OrderedDict) so it has a special case that it's
# ignored if it's empty but not None
@support_nddata
def test(data, meta=None):
return meta
ndd = NDData(np.ones((3, 3)))
assert test(ndd) is None
ndd._meta = {"a": 10}
assert test(ndd) == {"a": 10}
|
{
"content_hash": "38eedda105c34f88d5223cff3577a444",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 85,
"avg_line_length": 25.070866141732285,
"alnum_prop": 0.6234296482412061,
"repo_name": "astropy/astropy",
"id": "f392dbbb24a04ac8fdad37c6b446bea3853083bd",
"size": "9617",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "astropy/nddata/tests/test_decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11039709"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "79917"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12402561"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
import re, clue,sys
def message(*m):
print(*m,file=sys.stderr)
# loads the clues "line" by "line" from iterable (could be a file,
# list of strings etc),
# returns (dictionary of metadata (key => data), dictionary of clues (name => Clue))
def load_clues(iterable):
clues = {}
metadata = {}
count = 0
for cl in iterable:
stripped = cl.strip()
if not stripped:
continue
start = stripped[0] # get first character
if start == '@': # metadata
try:
rawkey,rawdata = stripped[1:].split(':',1) # separate on :
key = rawkey.lower().strip()
data = rawdata.strip()
if not key: # no real key
continue
if len(key) > 1 and key[-1] == '+': # its a multiple option, so make a list
key = key[:-1]
if key in metadata:
mk = metadata[key]
if isinstance(mk,list):
mk.append(data)
else:
metadata[key] = [mk, data]
else:
metadata[key] = [data]
else:
metadata[key] = data.strip()
except Exception as e:
message("Warning: Tried but couldn't parse as metadata:", cl)
message(str(e))
elif start != '#':
# not a comment
# parse the clues, possibly multiple due to separated
# clues
try:
for c in clue.parse_clues(stripped):
id = c.name()
if not id:
id = count
count += 1
clues[id] = c
except:
# probably couldn't parse the clue...
message("Warning: Couldn't parse as a clue:", cl)
return (metadata,clues)
# load clues from a file
def from_file(filename):
return load_clues(open(filename,'rU'))
# turn a dictionary of clues into a representation of the grid
def make_grid(clues):
# work out the size of the grid, by going through all the clues
minx,maxx = (float('inf'),float('-inf'))
miny,maxy = minx,maxx
for name,c in clues.items():
for x,y in (c.endpoint(),c.startpoint()):
maxx = max(maxx,x)
minx = min(minx,x)
maxy = max(maxy,y)
miny = min(miny,y)
# we've got the size of the grid
xlen = maxx - minx + 1
ylen = maxy - miny + 1
# the grid is represent by a matrix of
# (letter, across_clue_that_starts_here, down_clue_that_starts_here)
# with None for blank cells, and None in any of the elements of the
# tuple to represent missing value
grid = [[None] * xlen for _ in range(ylen)]
# go through the clues again, filling in the grid with the letters
# die if there is a overlap, with mismatched letters
for name,c in clues.items():
x,y = c.startpoint()
endx,endy = c.endpoint()
# normalise the clue to fit in the 0-based indexed grid
x -= minx
y -= miny
endx -= minx
endy -= miny
# get the answer, if the clue doesn't have one (i.e. it was
# defined by a length spec), then use None
answer = c.text_answer()
if not answer:
answer = [None] * c.length()
# get the stuff at our current letter
cur = grid[y][x]
if not cur: # it is answer blank square
cur = (answer[0],None,None)
elif not cur[0]: # it doesn't have a letter
cur = (answer[0],cur[1],cur[2])
# check the first letter match
if answer[0] and cur[0] and answer[0] != cur[0][0]:
raise ValueError("Mismatched letters ('%s' vs '%s') at (%d, %d)" % (cur[0],answer[0],x+minx,y+miny))
# across clue
if c.is_across():
if cur[1]:
raise ValueError("Two clues starting at (%d,%d)" % (x+minx,y+miny))
grid[y][x] = (cur[0],c,cur[2]) # update the starting cell
# go through the rest of the answer, filling in as appropriate
for char,i in zip(answer[1:],range(x+1,endx + 1)):
curgrid = grid[y][i]
if not curgrid: # blank cell,
grid[y][i] = (char,None,None)
elif not curgrid[0]: # the letter was blank
grid[y][i] = (char,curgrid[1],curgrid[2])
elif char and curgrid[0] != char: # mismatch!!
raise ValueError("Mismatched letters ('%s' vs. '%s') at (%d, %d)" % (curgrid[0],char,i+minx,y+miny))
else: # down clue
if cur[2]:
raise ValueError("Two clues starting at (%d,%d)" % (x+minx,y+miny))
grid[y][x] = (cur[0],cur[1],c) # update the starting cell
# go through the rest of the answer, filling in as appropriate
for char,i in zip(answer[1:],range(y+1,endy + 1)):
curgrid = grid[i][x]
if not curgrid: # blank cell
grid[i][x] = (char,None,None)
elif not curgrid[0]: # the letter was blank
grid[i][x] = (char,curgrid[1],curgrid[2])
elif char and curgrid[0] != char: # mismatch!!
raise ValueError("Mismatched letters ('%s' vs. '%s') at (%d, %d)" % (curgrid[0],char,i+minx,y+miny))
# now go through the grid from left-to-right, top-to-bottom,
# numbering clues
count = 0
for row in grid:
for clue in row:
# check that the cell isn't blank and that a clue starts here
if clue and (clue[1] or clue[2]):
count += 1
if clue[1]:
clue[1].number(count)
if clue[2]:
clue[2].number(count)
# the numbers are known, so now go and resolve references
# (like "See 12-across")
for name,c in clues.items():
c.resolve_names(clues)
return grid
# massively hacky, but, take a grid and metadata and render the crossword
def render_as_latex(grid,metadata={},answers=False):
# matches stuff in the form "[foo]bar"
RE_OPTIONS = re.compile(r'^\[([^\]]*)\](.*)$')
ylen = len(grid)
xlen = len(grid[0])
break_page = 'break' in metadata and metadata['break'].lower() == "true"
landscape = metadata.get('orientation','portrait').lower() == 'landscape'
# parse the margin
margin = 'margin=1in'
if 'margin' in metadata:
# space in the middle, so it's a complicated declaration
m = metadata['margin'].strip()
if ' ' in m:
l = m.split()
if len(l) == 2: # <vertical> <horizontal>
margin = 'top={0},right={1},bottom={0},left={1}'.format(*l)
elif len(l) == 4: # <top> <right> <bottom> <left>
margin = 'top=%s,right=%s,bottom=%s,left=%s' % tuple(l)
else:
raise ValueError("Invalid margin declaration: %s" % metadata['margin'])
else: # just one number
margin = 'margin=%s' % m
# \documentclass stuff
docclass='article'
docclassoptions = 'a4paper,10pt'
if 'documentclass' in metadata:
# check if it's of the form "[options,...]class"
m = RE_OPTIONS.match(metadata['documentclass'])
if m: # yep there's new options
docclass = m.group(2).strip()
docclassoptions = m.group(1)
else: # nope
docclass = metadata['documentclass']
docclassoptions = ''
# the document is represented as a large list, which is "".join'd at the end
# setup, default/required packages
latex = [r'''\documentclass[%s]{%s}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{lmodern}
\usepackage[%s,%s]{geometry}
\usepackage{tikz}
\usetikzlibrary{positioning}
\usepackage{multicol}
\usepackage{amsmath}''' % (docclassoptions,docclass,landscape and 'landscape' or 'portrait', margin)]
# load more packages
packagesl = metadata.get('package',[])
if not isinstance(packagesl, list): # make sure its a list
packagesl = [packagesl]
for p in packagesl:
m = RE_OPTIONS.match(p)
if m: # check if it has options
options = m.group(1)
name = m.group(2).strip()
else: # no options
options = ''
name = p
latex.append(r'\usepackage[%s]{%s}' % (options,name))
# no indent, and use sans serif, and no page numbers
latex.append(r'''\renewcommand{\familydefault}{\sfdefault}
\setlength\parindent{0pt}
\pagestyle{empty}
\begin{document}
\thispagestyle{empty}''')
# we have a title!
if 'title' in metadata:
latex.append(r'\centerline{\Large %s}\medskip'%metadata['title'])
# we have an author!
if 'author' in metadata:
latex.append(r'\centerline{%s}\medskip'%metadata['author'])
# in landscape the clues and crossword go next to each other
if landscape:
latex.append(r'\begin{multicols}{2}')
# the scale of the tikzpicture (default is .8)
scale = metadata.get('scale','0.8')
tikz = [r'\vspace*{\fill}'] # make it vertically centered (approximately)
tikz.append(r'''\begin{center}
\scalebox{%s}{
\begin{tikzpicture}[number/.style={below right},
answer/.style={color=gray,font=\scshape}]''' % (scale))
tikz.append(r'\draw[black] (0,%d) grid (%d,0);' % (-ylen,xlen)) # draw the grid
# might as well save the clues for later, for efficiencies sake
across = []
down = []
# go through the grid (left-to-right, top-to-bottom) drawing
# numbers or black squares as appropriate
for i,row in enumerate(grid):
for j,c in enumerate(row):
if c: # yep there is a letter
if answers and c[0]: # we need to print the letter (and it exists)
tikz.append(r'\node[answer] at (%.1f,%.1f) {%s};' % (j+0.5,-i-0.5,c[0]))
if c[1] or c[2]: # a clue starts here
if c[1]: # a wild across clue appears
num = c[1].number()
across.append(c[1])
if c[2]: # down too!
num = c[2].number()
down.append(c[2])
# draw the number
tikz.append(r'\node[number] at (%d,%d) {%d};' % (j,-i,num))
else:
# it's empty, so make it black
tikz.append(r'\fill[black] (%d,%d) rectangle (%d,%d);' % (j,-i,j+1,-i-1))
# finish up
tikz.append(r'''
\end{tikzpicture}}
\end{center}''')
# vertically centered
tikz.append(r'\vspace*{\fill}\vspace*{\fill}\vspace*{\fill}\vspace*{\fill}')
# crossword goes on the right in landscape, so it needs to be added later
if not landscape:
latex += tikz
# do we put the clues separately?
if break_page:
latex.append(r'\pagebreak\vspace*{\fill}') # (vertically center the clues)
# clues in 2 columns
latex.append(r'\begin{multicols}{2}')
latex.append(r'\subsection*{Across}')
# How to render the clues. Takes a number, the text of the clue, a
# length spec and a list of the "child" clues of this clue
# (i.e. separate parts of a separated clue)
def rrr(num, clu, lstring, children):
# extra things to put as referenced clues
extra = ''
if children: # there are children
_extra = []
for cccc in children:
_extra.append("%d-%s" % (cccc.number(), cccc.direction_name(True)))
extra = ', '+ ', '.join(_extra) # comma separated ", 1-down, 2-across"
if lstring is None: # no length string
return r'\textbf{%d%s} %s' % (num,extra, clu)
else:
return r'\textbf{%d%s} %s (%s)' % (num, extra, clu,lstring)
# add all the rendered across clues
for c in across:
latex.append(rrr(c.number(), c.clue(),c.length_spec(),c.children()) + '\n')
# down!
latex.append(r'\subsection*{Down}')
for c in down:
latex.append(rrr(c.number(), c.clue(),c.length_spec(),c.children()) + '\n')
latex.append(r'\end{multicols}') # end the multicols for the clues
if break_page: # vertically center clues if they are on a different page
latex.append(r'\vspace*{\fill}\vspace*{\fill}\vspace*{\fill}')
# crossword on the right (and end the multicol that aligns everything)
if landscape:
latex += tikz
latex.append(r'\end{multicols}')
# done! phew!
latex.append(r'\end{document}')
return '\n'.join(latex)
if __name__ == '__main__':
import sys, getopt
# options
ops,args = getopt.getopt(sys.argv[1:],'A')
# only one option though
answers = False
for op,arg in ops:
if op == '-A':
answers = True
f = sys.stdin # default to stdin
if args: # but if there are files specified, use them
f = open(args[0],'rU')
# load the clues
metadata,clues=load_clues(f)
if clues: # yep, found clues!
try:
grid = make_grid(clues)
except ValueError as e: # failed!
message("Error:", e)
sys.exit(1)
print(render_as_latex(grid,metadata,answers))
else:
message("Error: No clues found")
sys.exit(2)
|
{
"content_hash": "bcd716d229dcfaf6e78d7db849c2bcff",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 120,
"avg_line_length": 36.146596858638745,
"alnum_prop": 0.5288238702201622,
"repo_name": "huonw/crossworder",
"id": "c947090794aee828cf128677b97b489336219851",
"size": "13850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crossworder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23934"
}
],
"symlink_target": ""
}
|
"""
Tests for .importer module.
"""
import os
import sys
import recursely
from tests._compat import TestCase
TESTS_DIR = os.path.dirname(__file__)
IMPORTED_DIR = os.path.join(TESTS_DIR, 'imported')
def test_imported_dir_is_not_package():
"""Make sure `imported` directory was not made into a package.
The reason it can't be a package is that nose will otherwise "collect" it
in order to look for tests inside. And collection means importing,
which we don't want to do before actual tests.
"""
imported_init_py = os.path.join(IMPORTED_DIR, '__init__.py')
assert not os.path.exists(imported_init_py)
class _RecursiveImporter(TestCase):
"""Base class for :class:`RecursiveImporter` test cases."""
@classmethod
def setUpClass(cls):
# make it possible to import from tests/imported directory
sys.path.insert(0, IMPORTED_DIR)
def tearDown(self):
"""Clean-up whatever we have made.
This is to provide that every test begins with a clear known state
of interpreter's importing mechanism.
"""
# uninstall the hook, if it was ever installed.
# TODO(xion): consider exposing an ``uninstall`` function.
sys.meta_path = [ih for ih in sys.meta_path
if type(ih) is not recursely.RecursiveImporter]
# remove any of our test packages that we might have imported
for package in os.listdir(IMPORTED_DIR):
for name in list(sys.modules):
if name == package or name.startswith(package + '.'):
del sys.modules[name]
class Install(_RecursiveImporter):
"""Tests for the ``RecursiveImporter.install`` method."""
def test_required_call(self):
"""Test that the recursive importing doesn't magically kick in
without us calling the ``install`` method.
"""
import justmodules as pkg
self.assertFalse(hasattr(pkg, 'a'))
def test_retroactive__true(self):
import justmodules as pkg
recursely.install(retroactive=True)
self.assertTrue(hasattr(pkg, 'a'))
def test_retroactive__false(self):
import justmodules as pkg
recursely.install(retroactive=False)
self.assertFalse(hasattr(pkg, 'a'))
def test_duplicate_call(self):
recursely.install()
recursely.install()
recursive_importers = [ih for ih in sys.meta_path
if type(ih) is recursely.RecursiveImporter]
self.assertEquals(1, len(recursive_importers))
class Import(_RecursiveImporter):
"""Tests for the recursive importing through :class:`RecursiveImporter`."""
def setUp(self):
super(Import, self).setUp()
recursely.install()
def test_import__only_submodules(self):
"""Package with just one level of submodules."""
import justmodules as pkg
self.assertEquals(pkg.a.A, 1)
self.assertEquals(pkg.b.B, 2)
def test_import__only_subpackages(self):
"""Package with just one level of subpackages."""
import justpackages as pkg
self.assertEquals(pkg.a.A, 1)
self.assertEquals(pkg.b.B, 2)
def test_import__both__one_level(self):
"""Package with modules and packages up to one level of recursion."""
import both1level as pkg
self.assertEquals(pkg.a.A, 1)
self.assertEquals(pkg.b.B, 2)
def test_import__both__two_levels(self):
"""Package with modules and packages up to two levels of recursion."""
import both2levels as pkg
self.assertEquals(pkg.a.A, 1)
self.assertEquals(pkg.b.B, 2)
self.assertEquals(pkg.a.c.C, 3)
self.assertEquals(pkg.a.d.D, 4)
def test_import__both__three_levels(self):
"""Package with modules and packages up to three levels of recursion."""
import both3levels as pkg
self.assertEquals(pkg.a.A, 1)
self.assertEquals(pkg.b.B, 2)
self.assertEquals(pkg.a.c.C, 3)
self.assertEquals(pkg.a.d.D, 4)
self.assertEquals(pkg.a.e.E, 5)
self.assertEquals(pkg.a.c.f.F, 6)
self.assertEquals(pkg.a.c.g.G, 7)
def test_import__star(self):
"""Package with ``_recursive__ = '*'``."""
import starimport as pkg
self.assertEquals(pkg.A, 1)
self.assertEquals(pkg.B, 2)
|
{
"content_hash": "85bd27e6308f5809089fa7cd210717c5",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 80,
"avg_line_length": 33.86821705426357,
"alnum_prop": 0.6335545891508354,
"repo_name": "Xion/recursely",
"id": "681434a6750531291bd3a2e89d1153ad6ff2b429",
"size": "4369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_importer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "28531"
}
],
"symlink_target": ""
}
|
"""Add section order as a template property
Revision ID: 44b77ae3ee69
Revises: 228017954653
Create Date: 2015-03-18 00:42:45.782409
"""
# revision identifiers, used by Alembic.
revision = '44b77ae3ee69'
down_revision = '228017954653'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('template_base', sa.Column('section_order', postgresql.ARRAY(sa.Integer()), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('template_base', 'section_order')
### end Alembic commands ###
|
{
"content_hash": "3df67bd65d37335318a16d2ae129d221",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 109,
"avg_line_length": 27.692307692307693,
"alnum_prop": 0.7152777777777778,
"repo_name": "codeforamerica/template-maker",
"id": "7e59f73c8c7885133cb5ebd34b99efaff23ea8db",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/44b77ae3ee69_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3199"
},
{
"name": "HTML",
"bytes": "28021"
},
{
"name": "JavaScript",
"bytes": "9441"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "96038"
},
{
"name": "Shell",
"bytes": "2628"
}
],
"symlink_target": ""
}
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Ossama Edbali', 'ossedb@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'sigildb.sqlite3', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Rome'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '8e^h@qy8xk*cra216+54&+$4jyc2rpk59ug!$52xo%3ue75h7&'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
LOGIN_URL = '/login/'
LOGIN_EXEMPT_URLS = (
r'^$',
r'^signup/$',
r'^documentation/developers/$',
r'^documentation/users/$',
r'^signup-success/$'
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'pybb.middleware.PybbMiddleware',
'tkit.middleware.LoginRequiredMiddleware'
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tkit.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tkit.wsgi.application'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.static',
'django.core.context_processors.i18n',
'pybb.context_processors.processor',
'django_messages.context_processors.inbox',
'sigil.context_processors.tasks_count',
'sigil.context_processors.first_five_tasks',
'sigil.context_processors.color_schema',
'sigil.context_processors.absence_limit',
'sigil.context_processors.spc_limit',
'sigil.context_processors.negative_notes_limit'
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'bootstrapform',
'pybb',
'django_messages',
'easy_pdf',
'sigil'
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
{
"content_hash": "e0683b43861945a36007cad34ca6eb50",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 127,
"avg_line_length": 33.455026455026456,
"alnum_prop": 0.6884390321050134,
"repo_name": "oss6/sigil",
"id": "3fe6dae983e817310df3d68eef1663561b57d8e2",
"size": "6359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tkit/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "241997"
},
{
"name": "JavaScript",
"bytes": "147416"
},
{
"name": "Python",
"bytes": "57687"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns
from django.conf.urls import url
from searchlight_ui.dashboards.project.search import views
urlpatterns = patterns(
'searchlight_ui.dashboards.project.search.views',
url(r'^$', views.IndexView.as_view(), name='index'),
)
|
{
"content_hash": "c01a99d80bb40a28550e4079deb83c93",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 58,
"avg_line_length": 27,
"alnum_prop": 0.7518518518518519,
"repo_name": "ttripp/searchlight-ui",
"id": "243521f428b2bd58501625525eb34fbe436f794a",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "searchlight_ui/dashboards/project/search/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "12333"
},
{
"name": "JavaScript",
"bytes": "83201"
},
{
"name": "Python",
"bytes": "24895"
},
{
"name": "Shell",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
"""Pytest initialization"""
from hypothesis import settings
# To really run a lot of Hypothesis:
# pytest --hypothesis-profile=crazy --hypothesis-show-statistics
settings.register_profile("crazy", settings(max_examples=100000, timeout=600))
|
{
"content_hash": "f9bb536188767eabaec1789c16c05dcc",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 78,
"avg_line_length": 34.714285714285715,
"alnum_prop": 0.7818930041152263,
"repo_name": "nedbat/zellij",
"id": "dbc83c0bdaeac92d448f3de538e057fbfae8444d",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "759"
},
{
"name": "Python",
"bytes": "93643"
}
],
"symlink_target": ""
}
|
from ._security_insights import SecurityInsights
try:
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = ["SecurityInsights"]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
{
"content_hash": "08290a61f61c9786c77baa65acae283a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 28.76923076923077,
"alnum_prop": 0.6711229946524064,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6c57b1fcf3b2e781a8d582d2bd55d8935ef1175e",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/securityinsight/azure-mgmt-securityinsight/azure/mgmt/securityinsight/aio/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
class Accusation:
def __init__(self, player, suspect, weapon, location):
self.player = player
self.suspect = suspect
self.weapon = weapon
self.location = location
def to_dict(self):
return {
"player": str(self.player.id),
"suspect": self.suspect,
"weapon": self.weapon,
"location": self.location
}
|
{
"content_hash": "bf5e2c3f030ee7a5c26858bc8cfc649d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6676923076923077,
"repo_name": "SArnab/JHU-605.401.82-SupaFly",
"id": "e866ddebc6ebc118a3542f5393601538140bde5d",
"size": "325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/clue/game/accusation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4119"
},
{
"name": "JavaScript",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "29105"
},
{
"name": "Ruby",
"bytes": "803"
},
{
"name": "TypeScript",
"bytes": "32561"
}
],
"symlink_target": ""
}
|
from astar import astar
import curses, random
DUNGEON = """
#################
#
# ###########
# #
############# # #
# # #
# # #
# ################### #
# # #
# # #
# # # #
# ############# # #
# #
############### # #
# #
# #
# #
######################
"""
HEIGHT, WIDTH = 22, 79
MAX_LIMIT = HEIGHT * WIDTH
LIMIT = MAX_LIMIT // 2
DEBUG = False
COLOR = True
class Cell(object):
def __init__(self, char):
self.char = char
self.tag = 0
self.index = 0
self.neighbors = None
class Grid(object):
def __init__(self, cells):
self.height, self.width = len(cells), len(cells[0])
self.cells = cells
def __contains__(self, pos):
y, x = pos
return 0 <= y < self.height and 0 <= x < self.width
def __getitem__(self, pos):
y, x = pos
return self.cells[y][x]
def neighbors(self, y, x):
for dy, dx in ((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1),
(1, 0), (1, 1)):
if (y + dy, x + dx) in self:
yield y + dy, x + dx
def parse_grid(grid_str, width, height):
# Split the grid string into lines.
lines = [line.rstrip() for line in grid_str.splitlines()[1:]]
# Pad the top and bottom.
top = (height - len(lines)) // 2
bottom = (height - len(lines) + 1) // 2
lines = ([''] * top + lines + [''] * bottom)[:height]
# Pad the left and right sides.
max_len = max(len(line) for line in lines)
left = (width - max_len) // 2
lines = [' ' * left + line.ljust(width - left)[:width - left]
for line in lines]
# Create the grid.
cells = [[Cell(char) for char in line] for line in lines]
return Grid(cells)
class Engine(object):
def __init__(self, grid):
self.grid = grid
self.y = random.randrange(self.grid.height)
self.x = random.randrange(self.grid.width)
self.goal = (random.randrange(self.grid.height),
random.randrange(self.grid.width))
self.limit = LIMIT
self.tag = 1
self.nodes = {}
self.path = []
self.dirty = True
self.debug = DEBUG
self.color = COLOR
def move_cursor(self, dy, dx):
y, x = self.y + dy, self.x + dx
if (y, x) in self.grid:
self.y, self.x = y, x
self.dirty = True
def update_path(self):
if not self.dirty:
return
self.dirty = False
self.tag += 1
def neighbors(pos):
cell = self.grid[pos]
if cell.neighbors is None:
y, x = pos
cell.neighbors = []
for neighbor_y, neighbor_x in self.grid.neighbors(y, x):
if self.grid[neighbor_y, neighbor_x].char != '#':
cell.neighbors.append((neighbor_y, neighbor_x))
return cell.neighbors
def goal(pos):
return pos == self.goal
def cost(from_pos, to_pos):
from_y, from_x = from_pos
to_y, to_x = to_pos
return 14 if to_y - from_y and to_x - from_x else 10
def estimate(pos):
y, x = pos
goal_y, goal_x = self.goal
dy, dx = abs(goal_y - y), abs(goal_x - x)
return min(dy, dx) * 14 + abs(dy - dx) * 10
def debug(nodes):
self.nodes = nodes
self.path = astar((self.y, self.x), neighbors, goal, 0, cost,
estimate, self.limit, debug)
def update_view(stdscr, engine):
# Update the grid view.
success = ((engine.y, engine.x) == engine.goal
or engine.path and engine.goal == engine.path[-1])
for y, line in enumerate(engine.grid.cells):
for x, cell in enumerate(line):
char = cell.char
color = curses.COLOR_BLUE if char == '#' else curses.COLOR_BLACK
if engine.debug:
node = engine.nodes.get((y, x))
if node is not None:
char = '.'
color = curses.COLOR_YELLOW
stdscr.addch(y, x, char, curses.color_pair(color) if engine.color
else 0)
# Update the status lines.
blocked = (engine.grid[engine.y, engine.x].char == '#')
status_1 = ['[+-] Limit = %d' % engine.limit]
if (engine.y, engine.x) != engine.goal:
status_1.append('[ENTER] Goal')
status_1.append('[SPACE] %s' % ('Unblock' if blocked else 'Block'))
status_1.append('[Q]uit')
status_2 = 'Searched %d nodes.' % len(engine.nodes)
stdscr.addstr(HEIGHT, 0, (' '.join(status_1)).ljust(WIDTH)[:WIDTH],
curses.A_STANDOUT)
stdscr.addstr(HEIGHT + 1, 0, status_2.ljust(WIDTH)[:WIDTH])
# Update the path and goal.
path_color = curses.COLOR_GREEN if success else curses.COLOR_RED
path_attr = curses.color_pair(path_color) if engine.color else 0
if engine.debug:
path_attr |= curses.A_STANDOUT
for i, pos in enumerate(engine.path):
y, x = pos
stdscr.addch(y, x, ':', path_attr)
goal_y, goal_x = engine.goal
stdscr.addch(goal_y, goal_x, '%', path_attr)
# Update the start.
if (engine.y, engine.x) == engine.goal:
char = '%'
elif engine.grid[engine.y, engine.x].char == '#':
char = '#'
else:
char = '@'
stdscr.addch(engine.y, engine.x, char)
stdscr.move(engine.y, engine.x)
def read_command(stdscr):
key = stdscr.getch()
stdscr.nodelay(True)
while True:
if stdscr.getch() == -1:
break
stdscr.nodelay(False)
return key
def handle_command(key, engine):
# Move the cursor.
if key == ord('7'): engine.move_cursor(-1, -1)
if key in (ord('8'), curses.KEY_UP): engine.move_cursor(-1, 0)
if key == ord('9'): engine.move_cursor(-1, 1)
if key in (ord('4'), curses.KEY_LEFT): engine.move_cursor( 0, -1)
if key in (ord('6'), curses.KEY_RIGHT): engine.move_cursor( 0, 1)
if key == ord('1'): engine.move_cursor( 1, -1)
if key in (ord('2'), curses.KEY_DOWN): engine.move_cursor( 1, 0)
if key == ord('3'): engine.move_cursor( 1, 1)
# Change the search limit.
if key == ord('+'):
if engine.limit < MAX_LIMIT:
engine.limit += 1
engine.dirty = True
if key == ord('-'):
if engine.limit > 0:
engine.limit -= 1
engine.dirty = True
# Insert or delete a block at the cursor.
if key == ord(' '):
cell = engine.grid[engine.y, engine.x]
cell.char = ' ' if cell.char == '#' else '#'
for y, x in engine.grid.neighbors(engine.y, engine.x):
engine.grid[y, x].neighbors = None
engine.dirty = True
if key in (ord('\n'), curses.KEY_ENTER):
if (engine.y, engine.x) != engine.goal:
engine.goal = engine.y, engine.x
engine.dirty = True
if key in (ord('d'), ord('D')):
engine.debug = not engine.debug
if key in (ord('c'), ord('C')) and COLOR:
engine.color = not engine.color
def main(stdscr):
if COLOR:
curses.use_default_colors()
for i in xrange(curses.COLOR_RED, curses.COLOR_WHITE + 1):
curses.init_pair(i, i, -1)
grid = parse_grid(DUNGEON, WIDTH, HEIGHT)
engine = Engine(grid)
while True:
engine.update_path()
update_view(stdscr, engine)
key = read_command(stdscr)
if key in (ord('q'), ord('Q')):
break
handle_command(key, engine)
if __name__ == '__main__':
curses.wrapper(main)
|
{
"content_hash": "089d9001826153d23dbd597e3362d0f5",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 77,
"avg_line_length": 32.36470588235294,
"alnum_prop": 0.47606930813037684,
"repo_name": "elemel/python-astar",
"id": "e693d73060bc7105240558cd830029f7ad75ac0a",
"size": "9345",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/astar_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14917"
}
],
"symlink_target": ""
}
|
readpost = open('../used/test/post_inside.txt','r');
readcmnt = open('../used/test/cmnt_inside.txt','r');
readtran = open('../used/test/encdec_trans_inside.txt','r');
def preprocess(line):
lline = list(line.decode("utf-8"));
lline = [x for x in lline if x != u' '];
return lline
def compareSen(line1,line2):
lline1 = preprocess(line1);
lline2 = preprocess(line2);
senLen = min(len(lline1),len(lline2));
mark = True;
for i in xrange(senLen):
if lline1[i] != lline2[i]:
mark = False;
break
return mark
def main(count):
i = 0;
amount = 0;
while i < count:
line2 = readcmnt.readline();
line3 = readtran.readline();
if not line2 or not line3:
break
if compareSen(line2,line3):
amount += 1
i += 1
print "touch the end, the amount of lines is : ",i
print "the total amount of the same or part same sentences is :",amount
if __name__ == '__main__':
main(1000000);
|
{
"content_hash": "dfae4a0a2642c1d05e121ced5b1f1364",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 29.852941176470587,
"alnum_prop": 0.5714285714285714,
"repo_name": "JianboTang/modified_GroundHog",
"id": "d95672636bc131bb66d66aeb52dd351629d74a23",
"size": "1015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fork_process/dataPreprocess/result_analysis/analysis_1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8023"
},
{
"name": "PHP",
"bytes": "372"
},
{
"name": "Perl",
"bytes": "28016"
},
{
"name": "Python",
"bytes": "471663"
},
{
"name": "Shell",
"bytes": "6435"
}
],
"symlink_target": ""
}
|
""" This file configures python logging for the pytest framework
integration tests
Note: pytest must be invoked with this file in the working directory
E.G. py.test frameworks/<your-frameworks>/tests
"""
import json
import logging
import os
import os.path
import re
import retrying
import shutil
import sys
import time
import pytest
import sdk_cmd
import sdk_security
import sdk_utils
import teamcity
log_level = os.getenv('TEST_LOG_LEVEL', 'INFO').upper()
log_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'EXCEPTION')
assert log_level in log_levels, \
'{} is not a valid log level. Use one of: {}'.format(log_level, ', '.join(log_levels))
# write everything to stdout due to the following circumstances:
# - shakedown uses print() aka stdout
# - teamcity splits out stdout vs stderr into separate outputs, we'd want them combined
logging.basicConfig(
format='[%(asctime)s|%(name)s|%(levelname)s]: %(message)s',
level=log_level,
stream=sys.stdout)
# reduce excessive DEBUG/INFO noise produced by some underlying libraries:
for noise_source in [
'dcos.http',
'dcos.marathon',
'dcos.util',
'paramiko.transport',
'urllib3.connectionpool']:
logging.getLogger(noise_source).setLevel('WARNING')
log = logging.getLogger(__name__)
# Regex pattern which parses the output of "dcos task log ls --long", in order to extract the filename and timestamp.
# Example inputs:
# drwxr-xr-x 6 nobody nobody 4096 Jul 21 22:07 jre1.8.0_144
# drwxr-xr-x 3 nobody nobody 4096 Jun 28 12:50 libmesos-bundle
# -rw-r--r-- 1 nobody nobody 32539549 Jan 04 16:31 libmesos-bundle-1.10-1.4-63e0814.tar.gz
# Example output:
# match.group(1): "4096 ", match.group(2): "Jul 21 22:07", match.group(3): "jre1.8.0_144 "
# Notes:
# - Should also support spaces in filenames.
# - Doesn't make any assumptions about the contents of the tokens before the timestamp/filename,
# just assumes that there are 5 of them.
# TOKENS MONTH DAY HH:MM FILENAME
task_ls_pattern = re.compile('^([^ ]+ +){5}([a-zA-z]+ [0-9]+ [0-9:]+) +(.*)$')
# An arbitrary limit on the number of tasks that we fetch logs from following a failed test:
# 100 (task id limit)
# 2 (stdout + stderr file per task)
# x ~4s (time to retrieve each file)
# ---------------------
# max ~13m20s to download logs upon test failure (plus any .1/.2/.../.9 logs)
testlogs_task_id_limit = 100
# Keep track of task ids to collect logs at the correct times. Example scenario:
# 1 Test suite test_sanity_py starts with 2 tasks to ignore: [test_placement-0, test_placement-1]
# 2 test_sanity_py.health_check passes, with 3 tasks created: [test-scheduler, pod-0-task, pod-1-task]
# 3 test_sanity_py.replace_0 fails, with 1 task created: [pod-0-task-NEWUUID]
# Upon failure, the following task logs should be collected: [test-scheduler, pod-0-task, pod-1-task, pod-0-task-NEWUUID]
# 4 test_sanity_py.replace_1 succeeds, with 1 task created: [pod-1-task-NEWUUID]
# 5 test_sanity_py.restart_1 fails, with 1 new task: [pod-1-task-NEWUUID2]
# Upon failure, the following task logs should be collected: [pod-1-task-NEWUUID, pod-1-task-NEWUUID2]
# These are the tasks which were newly created following the prior failure.
# Previously-collected tasks are not collected again, even though they may have additional log content.
# In practice this is fine -- e.g. Scheduler would restart with a new task id if it was reconfigured anyway.
# The name of current test suite (e.g. 'test_sanity_py'), or an empty string if no test suite has
# started yet. This is used to determine when the test suite has changed in a test run.
testlogs_current_test_suite = ""
# The list of all task ids to ignore when fetching task logs in future test failures:
# - Task ids that already existed at the start of a test suite.
# (ignore tasks unrelated to this test suite)
# - Task ids which have been logged following a prior failure in the current test suite.
# (ignore task ids which were already collected before, even if there's new content)
testlogs_ignored_task_ids = set([])
# The index of the current test, which increases as tests are run, and resets when a new test suite
# is started. This is used to sort test logs in the order that they were executed, and is useful
# when tracing a chain of failed tests.
testlogs_test_index = 0
def get_task_ids():
""" This function uses dcos task WITHOUT the JSON options because
that can return the wrong user for schedulers
"""
tasks = sdk_cmd.run_cli('task --all', print_output=False).split('\n')
for task_str in tasks[1:]: # First line is the header line
task = task_str.split()
if len(task) < 5:
continue
yield task[4]
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
'''Hook to run after every test, before any other post-test hooks.
See also: https://docs.pytest.org/en/latest/example/simple.html\
#making-test-result-information-available-in-fixtures
'''
# Execute all other hooks to obtain the report object, then a report attribute for each phase of
# a call, which can be "setup", "call", "teardown".
# Subsequent fixtures can get the reports off of the request object like: `request.rep_setup.failed`.
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
# Handle failures. Must be done here and not in a fixture in order to
# properly handle post-yield fixture teardown failures.
if rep.failed:
# Fetch all logs from tasks created since the last failure, or since the start of the suite.
global testlogs_ignored_task_ids
new_task_ids = [id for id in get_task_ids() if id not in testlogs_ignored_task_ids]
testlogs_ignored_task_ids = testlogs_ignored_task_ids.union(new_task_ids)
# Enforce limit on how many tasks we will fetch logs from, to avoid unbounded log fetching.
if len(new_task_ids) > testlogs_task_id_limit:
log.warning('Truncating list of {} new tasks to size {} to avoid fetching logs forever: {}'.format(
len(new_task_ids), testlogs_task_id_limit, new_task_ids))
del new_task_ids[testlogs_task_id_limit:]
log.info('Test {} failed in {} phase.'.format(item.name, rep.when))
try:
log.info('Fetching logs for {} tasks launched in this suite since last failure: {}'.format(
len(new_task_ids), new_task_ids))
dump_task_logs(item, new_task_ids)
except Exception:
log.exception('Task log collection failed!')
try:
log.info('Fetching mesos state')
dump_mesos_state(item)
except Exception:
log.exception('Mesos state collection failed!')
try:
log.info('Creating/fetching cluster diagnostics bundle')
get_diagnostics_bundle(item)
except Exception:
log.exception("Diagnostics bundle creation failed")
log.info('Post-failure collection complete')
def pytest_runtest_teardown(item):
'''Hook to run after every test.'''
# Inject footer at end of test, may be followed by additional teardown.
# Don't do this when running in teamcity, where it's redundant.
if not teamcity.is_running_under_teamcity():
print('''
==========
======= END: {}::{}
=========='''.format(sdk_utils.get_test_suite_name(item), item.name))
def pytest_runtest_setup(item):
'''Hook to run before every test.'''
# Inject header at start of test, following automatic "path/to/test_file.py::test_name":
# Don't do this when running in teamcity, where it's redundant.
if not teamcity.is_running_under_teamcity():
print('''
==========
======= START: {}::{}
=========='''.format(sdk_utils.get_test_suite_name(item), item.name))
# Check if we're entering a new test suite.
global testlogs_test_index
global testlogs_current_test_suite
test_suite = sdk_utils.get_test_suite_name(item)
if test_suite != testlogs_current_test_suite:
# New test suite:
# 1 Store all the task ids which already exist as of this point.
testlogs_current_test_suite = test_suite
global testlogs_ignored_task_ids
testlogs_ignored_task_ids = testlogs_ignored_task_ids.union(get_task_ids())
log.info('Entering new test suite {}: {} preexisting tasks will be ignored on test failure.'.format(
test_suite, len(testlogs_ignored_task_ids)))
# 2 Reset the test index.
testlogs_test_index = 0
# 3 Remove any prior logs for the test suite.
test_log_dir = sdk_utils.get_test_suite_log_directory(item)
if os.path.exists(test_log_dir):
log.info('Deleting existing test suite logs: {}/'.format(test_log_dir))
shutil.rmtree(test_log_dir)
# Increment the test index (to 1, if this is a new suite), and pass the value to sdk_utils for use internally.
testlogs_test_index += 1
sdk_utils.set_test_index(testlogs_test_index)
min_version_mark = item.get_marker('dcos_min_version')
if min_version_mark:
min_version = min_version_mark.args[0]
message = 'Feature only supported in DC/OS {} and up'.format(min_version)
if 'reason' in min_version_mark.kwargs:
message += ': {}'.format(min_version_mark.kwargs['reason'])
if sdk_utils.dcos_version_less_than(min_version):
pytest.skip(message)
def setup_artifact_path(item: pytest.Item, artifact_name: str):
'''Given the pytest item and an artifact_name,
Returns the path to write an artifact with that name.'''
output_dir = sdk_utils.get_test_log_directory(item)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
return os.path.join(output_dir, artifact_name)
def get_task_files_for_id(task_id: str) -> dict:
try:
ls_lines = sdk_cmd.run_cli('task ls --long --all {}'.format(task_id)).split('\n')
ret = {}
for line in ls_lines:
match = task_ls_pattern.match(line)
if not match:
log.warning('Unable to parse line: {}'.format(line))
continue
# match.group(1): "4096 ", match.group(2): "Jul 21 22:07", match.group(3): "jre1.8.0_144 "
filename = match.group(3).strip()
# build timestamp for use in output filename: 'Jul 21 22:07' => '0721_2207'
timestamp = time.strftime('%m%d_%H%M', time.strptime(match.group(2), '%b %d %H:%M'))
ret[filename] = timestamp
return ret
except:
log.exception('Failed to get list of files for task: {}'.format(task_id))
return {}
def get_task_log_for_id(task_id: str, task_file: str='stdout', lines: int=1000000) -> str:
log.info('Fetching {} from {}'.format(task_file, task_id))
rc, stdout, stderr = sdk_cmd.run_raw_cli('task log {} --all --lines {} {}'.format(task_id, lines, task_file), print_output=False)
if rc != 0:
if not stderr.startswith('No files exist. Exiting.'):
log.error('Failed to get {} task log for task_id={}: {}'.format(task_file, task_id, stderr))
return ''
return stdout
def get_rotating_task_logs(task_id: str, task_file_timestamps: dict, task_file: str):
rotated_filenames = [task_file, ]
rotated_filenames.extend(['{}.{}'.format(task_file, i) for i in range(1, 10)])
for filename in rotated_filenames:
if filename not in task_file_timestamps:
return # Reached a log index that doesn't exist, exit early
content = get_task_log_for_id(task_id, filename)
if not content:
log.error('Unable to fetch content of {} from task {}, giving up'.format(filename, task_id))
return
yield filename, task_file_timestamps[filename], content
def dump_task_logs(item: pytest.Item, task_ids: list):
for task_id in task_ids:
# Get list of available files:
task_file_timestamps = get_task_files_for_id(task_id)
for task_file in ('stdout', 'stderr'):
for log_filename, log_timestamp, log_content in get_rotating_task_logs(task_id, task_file_timestamps, task_file):
# output filename (sort by time): '0104_1709.hello-world.0fe39302-f18b-11e7-a6f9-ae11b3b25138.stdout'
out_path = setup_artifact_path(item, '{}.{}.{}'.format(log_timestamp, task_id, log_filename))
log.info('=> Writing {} ({} bytes)'.format(out_path, len(log_content)))
with open(out_path, 'w') as f:
f.write(log_content)
def dump_mesos_state(item: pytest.Item):
for name in ['state.json', 'slaves']:
r = sdk_cmd.cluster_request('GET', '/mesos/{}'.format(name), verify=False, raise_on_error=False)
if r.ok:
if name.endswith('.json'):
name = name[:-len('.json')] # avoid duplicate '.json'
with open(setup_artifact_path(item, 'mesos_{}.json'.format(name)), 'w') as f:
f.write(r.text)
def get_diagnostics_bundle(item: pytest.Item):
rc, _, _ = sdk_cmd.run_raw_cli('node diagnostics create all')
if rc:
log.error('Diagnostics bundle creation failed.')
return
@retrying.retry(
wait_fixed=5000,
stop_max_delay=10*60*1000,
retry_on_result=lambda result: result is None)
def wait_for_bundle_file():
rc, stdout, stderr = sdk_cmd.run_raw_cli('node diagnostics --status --json')
if rc:
return None
# e.g. { "some-ip": { stuff we want } }
status = next(iter(json.loads(stdout).values()))
if status['job_progress_percentage'] != 100:
return None
# e.g. "/var/lib/dcos/dcos-diagnostics/diag-bundles/bundle-2018-01-11-1515698691.zip"
return os.path.basename(status['last_bundle_dir'])
bundle_filename = wait_for_bundle_file()
if bundle_filename:
sdk_cmd.run_cli('node diagnostics download {} --location={}'.format(
bundle_filename, setup_artifact_path(item, bundle_filename)))
else:
log.error('Diagnostics bundle didnt finish in time, giving up.')
|
{
"content_hash": "02c729f332d4268585dcf8f9909265cd",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 133,
"avg_line_length": 45.808306709265175,
"alnum_prop": 0.6466731761751988,
"repo_name": "vishnu2kmohan/dcos-commons",
"id": "003db814f1a47b36a70dfeb170a6d1ed18dfe949",
"size": "14338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "168256"
},
{
"name": "HTML",
"bytes": "99573"
},
{
"name": "Java",
"bytes": "2770769"
},
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "457961"
},
{
"name": "Shell",
"bytes": "46736"
}
],
"symlink_target": ""
}
|
"""
@package mi.dataset.parser.test.test_adcpt_acfgm_dcl_pd0
@fid marine-integrations/mi/dataset/parser/test/test_adcpt_acfgm_dcl_pd0.py
@author Jeff Roy
@brief Test code for a adcpt_acfgm_dcl_pd0 data parser
"""
import copy
import os
from datetime import datetime
import yaml
from nose.plugins.attrib import attr
from mi.core.exceptions import RecoverableSampleException
from mi.core.log import get_logger
from mi.dataset.driver.adcpt_acfgm.dcl.pd0.resource import RESOURCE_PATH
from mi.dataset.parser.adcpt_acfgm_dcl_pd0 import AdcptAcfgmDclPd0Parser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class AdcptAcfgmPd0DclParserUnitTestCase(ParserUnitTestCase):
"""
Adcp_jln Parser unit test suite
"""
def state_callback(self, state, fid_ingested):
""" Call back method to watch what comes in via the position callback """
self.state_callback_value = state
self.fid_ingested_value = fid_ingested
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config_recov = {}
self.config_telem = self.config_recov
self.fid_ingested_value = None
self.state_callback_value = None
self.publish_callback_value = None
def particle_to_yml(self, particles, filename, mode='w'):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml fids here.
"""
# open write append, if you want to start from scratch manually delete this fid
fid = open(os.path.join(RESOURCE_PATH, filename), mode)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' %(i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.16f\n' % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
def test_recov(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
The contents of ADCP_data_20130702.000 are the expected results
from the IDD. These results for the that record were manually verified
and are the entire parsed particle is represented in ADCP_data_20130702.yml
"""
# ND072022.PD0 contains a single ADCPA ensemble
with open(os.path.join(RESOURCE_PATH, '20140424.adcpt.log'), 'rU') as stream_handle:
parser = AdcptAcfgmDclPd0Parser(self.config_recov,
stream_handle,
self.exception_callback,
self.state_callback,
self.publish_callback)
particles = parser.get_records(31)
log.debug('got back %d particles', len(particles))
# Note the yml file was produced from the parser output but was hand verified
# against the sample outputs provided in the IDD
self.assert_particles(particles, '20140424.recov.adcpt.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_telem(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
The contents of ADCP_data_20130702.000 are the expected results
from the IDD. These results for the that record were manually verified
and are the entire parsed particle is represented in ADCP_data_20130702.yml
"""
# ND072022.PD0 contains a single ADCPA ensemble
with open(os.path.join(RESOURCE_PATH, '20140424.adcpt.log'), 'rb') as stream_handle:
parser = AdcptAcfgmDclPd0Parser(self.config_telem,
stream_handle,
self.exception_callback,
self.state_callback,
self.publish_callback)
particles = parser.get_records(31)
log.debug('got back %d particles', len(particles))
self.assert_particles(particles, '20140424.telem.adcpt.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, [])
def test_bad_data(self):
"""
Ensure that bad data is skipped when it exists.
"""
#20140424.adcpt_BAD.log has a corrupt record in it
with open(os.path.join(RESOURCE_PATH, '20140424.adcpt_BAD.log'), 'rb') as stream_handle:
parser = AdcptAcfgmDclPd0Parser(self.config_recov,
stream_handle,
self.exception_callback,
self.state_callback,
self.publish_callback)
#try to get a particle, should get none
parser.get_records(1)
self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))
def test_live_data(self):
files_without_records = [
'20140424.adcpt_BAD.log',
'20141007.adcpt.log',
'20141008.adcpt.log',
]
for filename in os.listdir(RESOURCE_PATH):
if filename.endswith('.log'):
log.debug('Testing file: %s', filename)
with open(os.path.join(RESOURCE_PATH, filename), 'rb') as fh:
parser = AdcptAcfgmDclPd0Parser(self.config_telem,
fh,
self.exception_callback,
self.state_callback,
self.publish_callback)
particles = parser.get_records(100)
log.debug('got back %d particles', len(particles))
if filename not in files_without_records:
self.assertGreater(len(particles), 0)
def convert_yml(input_file):
earth = [
'num_cells',
'cell_length',
'bin_1_distance',
'ensemble_number',
'heading',
'pitch',
'roll',
'salinity',
'temperature',
'transducer_depth',
'pressure',
'sysconfig_vertical_orientation',
'water_velocity_east',
'water_velocity_north',
'water_velocity_up',
'error_velocity',
'water_velocity_forward',
'water_velocity_starboard',
'water_velocity_vertical',
'correlation_magnitude_beam1',
'correlation_magnitude_beam2',
'correlation_magnitude_beam3',
'correlation_magnitude_beam4',
'echo_intensity_beam1',
'echo_intensity_beam2',
'echo_intensity_beam3',
'echo_intensity_beam4',
'percent_good_3beam',
'percent_transforms_reject',
'percent_bad_beams',
'percent_good_4beam',
]
config = [
'firmware_version',
'firmware_revision',
'data_flag',
'lag_length',
'num_beams',
'num_cells',
'pings_per_ensemble',
'cell_length',
'blank_after_transmit',
'signal_processing_mode',
'low_corr_threshold',
'num_code_repetitions',
'percent_good_min',
'error_vel_threshold',
'time_per_ping_minutes',
'time_per_ping_seconds',
'heading_alignment',
'heading_bias',
'reference_layer_start',
'reference_layer_stop',
'false_target_threshold',
'low_latency_trigger',
'transmit_lag_distance',
'cpu_board_serial_number',
'system_bandwidth',
'system_power',
'serial_number',
'beam_angle',
'sysconfig_frequency',
'sysconfig_beam_pattern',
'sysconfig_sensor_config',
'sysconfig_head_attached',
'sysconfig_vertical_orientation',
'sysconfig_beam_angle',
'sysconfig_beam_config',
'coord_transform_type',
'coord_transform_tilts',
'coord_transform_beams',
'coord_transform_mapping',
'sensor_source_speed',
'sensor_source_depth',
'sensor_source_heading',
'sensor_source_pitch',
'sensor_source_roll',
'sensor_source_conductivity',
'sensor_source_temperature',
'sensor_source_temperature_eu',
'sensor_available_speed',
'sensor_available_depth',
'sensor_available_heading',
'sensor_available_pitch',
'sensor_available_roll',
'sensor_available_conductivity',
'sensor_available_temperature',
'sensor_available_temperature_eu',
]
engineering = [
'transmit_pulse_length',
'speed_of_sound',
'mpt_minutes',
'mpt_seconds',
'heading_stdev',
'pitch_stdev',
'roll_stdev',
'pressure_variance',
'adc_ambient_temp',
'adc_attitude',
'adc_attitude_temp',
'adc_contamination_sensor',
'adc_pressure_minus',
'adc_pressure_plus',
'adc_transmit_current',
'adc_transmit_voltage',
]
stream_map = {
'adcp_velocity_earth': ('VelocityEarth', earth),
'adcp_config': ('AdcpsConfig', config),
'adcp_engineering': ('AdcpsEngineering', engineering),
}
streams = [
'adcp_velocity_earth',
'adcp_config',
'adcp_engineering',
]
always = streams[:1]
last = {}
ntp_epoch = datetime(1900, 1, 1)
def create_internal_timestamp(record):
rtc = record['real_time_clock']
dts = datetime(rtc[0] + 2000, *rtc[1:-1])
print dts
rtc_time = (dts - ntp_epoch).total_seconds() + rtc[-1] / 100.0
return rtc_time
def create_particle(record, index, stream, int_ts):
klass, fields = stream_map.get(stream)
particle = {field: record.get(field) for field in fields if field in record}
particle['_index'] = index
particle['particle_object'] = klass
particle['particle_type'] = stream
particle['internal_timestamp'] = create_internal_timestamp(record)
particle['port_timestamp'] = record['internal_timestamp']
if 'time_per_ping_seconds' in fields:
seconds = particle['time_per_ping_seconds']
int_seconds = int(seconds)
hundredths = int(100 * (seconds - int_seconds))
particle['time_per_ping_hundredths'] = hundredths
particle['time_per_ping_seconds'] = int_seconds
if 'mpt_seconds' in fields:
seconds = particle['mpt_seconds']
int_seconds = int(seconds)
hundredths = int(100 * (seconds - int_seconds))
particle['mpt_hundredths'] = hundredths
particle['mpt_seconds'] = int_seconds
if stream == 'adcp_engineering':
bit_result = (
record.get('bit_result_demod_1', 0) * 0b10000 +
record.get('bit_result_demod_0', 0) * 0b1000 +
record.get('bit_result_timing', 0) * 0b10
)
particle['bit_result'] = bit_result
esw = (
record.get('bus_error_exception', 0) +
record.get('address_error_exception', 0) * 0b10 +
record.get('illegal_instruction_exception', 0) * 0b100 +
record.get('zero_divide_instruction', 0) * 0b1000 +
record.get('emulator_exception', 0) * 0b10000 +
record.get('unassigned_exception', 0) * 0b100000 +
record.get('watchdog_restart_occurred', 0) * 0b1000000 +
record.get('battery_saver_power', 0) * 0b10000000 +
record.get('pinging', 0) * (0b1 << 8) +
record.get('cold_wakeup_occurred', 0) * (0b1000000 << 8) +
record.get('unknown_wakeup_occurred', 0) * (0b10000000 << 8) +
record.get('clock_read_error', 0) * (0b1 << 16) +
record.get('unexpected_alarm', 0) * (0b10 << 16) +
record.get('clock_jump_forward', 0) * (0b100 << 16) +
record.get('clock_jump_backward', 0) * (0b1000 << 16) +
record.get('power_fail', 0) * (0b1000 << 24) +
record.get('spurious_dsp_interrupt', 0) * (0b10000 << 24) +
record.get('spurious_uart_interrupt', 0) * (0b100000 << 24) +
record.get('spurious_clock_interrupt', 0) * (0b1000000 << 24) +
record.get('level_7_interrupt', 0) * (0b10000000 << 24)
)
particle['error_status_word'] = esw
return particle
def changed(particle):
particle = copy.deepcopy(particle)
stream = particle.pop('particle_type')
particle.pop('particle_object')
particle.pop('_index')
particle.pop('internal_timestamp')
particle.pop('port_timestamp')
last_values = last.get(stream)
if last_values == particle:
return False
last[stream] = particle
return True
out_records = []
records = yaml.load(open(input_file))
index = 1
base_internal_ts = 3607286478.639999866
increment = 600
for tindex, record in enumerate(records['data']):
for stream in streams:
particle = create_particle(record, index, stream, base_internal_ts + increment * tindex)
if stream in always or changed(particle):
out_records.append(particle)
index += 1
records['data'] = out_records
yaml.dump(records, open(input_file, 'w'))
def convert_all():
yml_files = [
'20140424.recov.adcpt.yml',
'20140424.telem.adcpt.yml',
]
for f in yml_files:
convert_yml(os.path.join(RESOURCE_PATH, f))
|
{
"content_hash": "1011cdf0aa562922ff3e2576069c5a26",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 110,
"avg_line_length": 37.529262086513995,
"alnum_prop": 0.5600379686758424,
"repo_name": "renegelinas/mi-instrument",
"id": "7da12e34c69325af0ac832ae764cdad0120d2609",
"size": "14772",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "mi/dataset/parser/test/test_adcpt_acfgm_dcl_pd0.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "10013408"
}
],
"symlink_target": ""
}
|
from functools import partial
import os
from mock import patch
from core import db
from tools import rename_checklist_type
import testing as T
class RenameTagTest(T.TestCase, T.FakeDataMixin):
checklist_keys = [ 'id', 'request', 'type', 'complete', 'target']
checklist_data = [
[1, 0, 'search', 0, 'stage'],
[2, 0, 'search', 0, 'prod'],
[3, 0, 'search-cleanup', 0, 'post-stage-verify'],
[4, 0, 'pushplans', 0, 'stage'],
[5, 0, 'pushplans-cleanup', 0, 'prod']
]
@T.setup_teardown
def setup_db(self):
self.db_file_path = T.testdb.create_temp_db_file()
T.MockedSettings['db_uri'] = T.testdb.get_temp_db_uri(self.db_file_path)
with patch.dict(db.Settings, T.MockedSettings):
db.init_db()
self.insert_checklists()
yield
db.finalize_db()
os.unlink(self.db_file_path)
def check_db_results(self, success, db_results):
if not success:
raise db.DatabaseError()
def verify_database_state(self, data, success, db_results):
self.check_db_results(success, db_results)
# id, push, *type*, status, target
data_types = [d[2] for d in data]
# id, push, *type*, status, target
types = [result[2] for result in db_results.fetchall()]
T.assert_sorted_equal(data_types, types)
def verify_type_rename(self, oldtype, newtype, success, db_results):
self.check_db_results(success, db_results)
# id, push, *type*, status, target
types = [result[2] for result in db_results.fetchall()]
T.assert_not_in(oldtype, types)
T.assert_not_in('%s-cleanup' % oldtype, types)
T.assert_in('%s' % newtype, types)
T.assert_in('%s-cleanup' % newtype, types)
def make_checklist_dict(self, data):
return dict(zip(self.checklist_keys, data))
def insert_checklists(self):
checklist_queries = []
for cl in self.checklist_data:
checklist_queries.append(db.push_checklist.insert(self.make_checklist_dict(cl)))
db.execute_transaction_cb(checklist_queries, self.on_db_return)
@patch('tools.rename_checklist_type.convert_checklist')
@patch('optparse.OptionParser.error')
@patch('optparse.OptionParser.parse_args', return_value=[None, []])
def test_main_noargs(self, parser, error, convert_checklist):
rename_checklist_type.main()
T.assert_equal(False, convert_checklist.called)
error.assert_called_once_with('Incorrect number of arguments')
@patch('tools.rename_checklist_type.convert_checklist')
@patch('optparse.OptionParser.error')
@patch('optparse.OptionParser.parse_args',
return_value=[None, ['oldtag', 'newtag']])
def test_main_twoargs(self, parser, error, convert_checklist):
parser.return_value=[None, ['oldtag', 'newtag']]
rename_checklist_type.main()
convert_checklist.assert_called_once_with('oldtag', 'newtag')
T.assert_equal(False, error.called)
def test_convert_cleanup_type(self):
rename_checklist_type.convert_checklist('search', 'not_search')
cb = partial(self.verify_type_rename, 'search', 'not_search')
db.execute_cb(db.push_checklist.select(), cb)
def test_convert_notype(self):
rename_checklist_type.convert_checklist('nonexistent', 'random')
cb = partial(self.verify_database_state, self.checklist_data)
db.execute_cb(db.push_checklist.select(), cb)
if __name__ == '__main__':
T.run()
|
{
"content_hash": "fed96eef59cdd6e8ad38fefcdf866f17",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 92,
"avg_line_length": 36.06060606060606,
"alnum_prop": 0.630532212885154,
"repo_name": "bis12/pushmanager",
"id": "5a4025adf1ca24156c0b99f47fc04370acb9d283",
"size": "3594",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/test_rename_checklist_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15229"
},
{
"name": "JavaScript",
"bytes": "31706"
},
{
"name": "Python",
"bytes": "250937"
},
{
"name": "Shell",
"bytes": "1214"
}
],
"symlink_target": ""
}
|
class ManagedObjectRepo(object):
"""Stores and manages KMIP managed objects.
The KMIP specification details the managed objects that are stored by a
KMIP server. This repository abstraction is an interface for KMIP servers
to store managed objects.
"""
def __init__(self):
pass
def save(self, managed_object, attributes):
"""Save a managed object
This saves a managed object into the repository and returns a UUID
string that can be used to reference the object in the repository.
:param managed_object: managed object to save from secrets.py
:param attributes: attributes to store with the managed object
:returns: a UUID string that can be used to retrieve the object later
"""
raise NotImplementedError
def get(self, uuid):
"""Retrieve a managed object
Retrieve a managed object from the repository. The UUID is used to
identify the managed object to return. The UUID is returned from the
save call.
A tuple is returned that contains the managed object and all of its
attributes.
:param uuid: UUID of the managed object
:returns: (managed_object, attributes) if object exists, otherwise
(None, None)
"""
raise NotImplementedError
def update(self, uuid, managed_object, attributes):
"""Updates a managed object
Updates the values for a managed_object.
:param uuid: UUID of the managed object
:param managed_object: managed object
:param attributes: attributes to store with the managed object
:returns: True if object existed and successfully updated, otherwise
False
"""
raise NotImplementedError
def delete(self, uuid):
"""Delete a managed object from the repository
Delete a managed object from the repository.
:param uuid: UUID of the managed object
:returns: True if successfully deleted, False if not found
"""
raise NotImplementedError
|
{
"content_hash": "27bc9db290ed6b14b9f06753b204fb14",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 36.421052631578945,
"alnum_prop": 0.6666666666666666,
"repo_name": "viktorTarasov/PyKMIP",
"id": "7b075c163a5bc8c9c60a5cbe0e2c157eb30692e7",
"size": "2723",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "kmip/services/server/repo/repo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1734599"
},
{
"name": "Shell",
"bytes": "27"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import sys
sys.path.append("..")
import paddle
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
create_test_class,
get_xpu_op_support_types,
XPUOpTestWrapper,
)
paddle.enable_static()
class XPUTestSignOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'sign'
self.use_dynamic_create_class = False
class TestSignOPBase(XPUOpTest):
def setUp(self):
self.place = paddle.XPUPlace(0)
self.init_dtype()
self.set_case()
def set_case(self):
self.op_type = 'sign'
self.dtype = self.in_type
self.init_config()
self.x = np.random.uniform(-10, 10, self.input_shape).astype(
self.dtype
)
self.inputs = {'X': self.x}
self.outputs = {'Out': np.sign(self.x)}
self.attrs = {'use_xpu': True}
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def init_config(self):
self.input_shape = [864]
class XPUTestSign1(TestSignOPBase):
def init_config(self):
self.input_shape = [2, 768]
class XPUTestSign2(TestSignOPBase):
def init_config(self):
self.input_shape = [3, 8, 4096]
class XPUTestSign3(TestSignOPBase):
def init_config(self):
self.input_shape = [1024]
class XPUTestSign4(TestSignOPBase):
def init_config(self):
self.input_shape = [2, 2, 255]
support_types = get_xpu_op_support_types('sign')
for stype in support_types:
create_test_class(globals(), XPUTestSignOP, stype)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "0b43d37fb23f9358b21875d0f176662f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 73,
"avg_line_length": 25.453333333333333,
"alnum_prop": 0.5788370874803562,
"repo_name": "luotao1/Paddle",
"id": "b498c5fc3a17e59819715939bd7ae19a7918c9ec",
"size": "2522",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/xpu/test_sign_op_xpu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
import io
import httplib
import time
import ConfigParser
import math
import json
agentStep = []
inside = []
firstStep = []
timeStart = []
timeStop = []
typeCharacterDict = {}
amountLeaders = None
behaviors = False
results = io.open('SmartSimResults', 'wb')
simulationStarted = False
config = ConfigParser.RawConfigParser()
config.read('SmartSimSettings.cfg')
amountAgents = config.getint('Settings', 'amountAgents')
ubikSimServer = config.get('Settings', 'ubikSimServer')
meshScenario = config.get('Settings', 'meshScenario')
modeSimulation = config.get('Settings', 'modeSimulation')
typesCharacters = config.options('Types')
for typeCharacter in typesCharacters:
charactersInType = config.get('Types',typeCharacter)
charactersInType = [int(n) for n in charactersInType.split(',')]
typeCharacterDict[typeCharacter] = charactersInType
print "|--------------------------------------------|"
print "| Starting SmartSim |"
print "|--------------------------------------------|"
print ""
scene.addAssetPath('script', 'scripts')
scene.loadAssets()
scene.run('configureModule.py')
scene.run('agentsCreationModule.py')
scene.run('tools.py')
scene.run('connectionsModule.py')
scene.run('scenarioModule.py')
scene.run('locomotionModule.py')
print "Initiating Scene"
ConfigureModule().init()
print "Initiating global variables"
ConfigureModule().initGlobalVariables(amountAgents)
print "Setting scene limits"
vectorLimits = ConfigureModule().autoSetLimits(ubikSimServer)
xLimitScene = vectorLimits[0]
yLimitScene = vectorLimits[1]
ConfigureModule().setLimits(xLimitScene, yLimitScene)
print "Setting scenario"
ScenarioModule().addScenario(meshScenario)
ScenarioModule().addEmergency(ubikSimServer)
print "Creating agents"
AgentsCreationModule().settleAgents(amountAgents, ubikSimServer)
print "Configuring camera settings"
cameraCenter=ConfigureModule().autoSetCamera(amountAgents)
cameraEye = [cameraCenter[0], 30, cameraCenter[2]+40]
cameraPosition = cameraEye
ConfigureModule().camera(cameraEye, cameraCenter, cameraPosition)
print "Getting routes"
ConnectionsModule().initSteps(amountAgents)
print 'Steering'
steerManager = scene.getSteerManager()
steerManager.setEnable(False)
steerManager.setEnable(True)
print "Scene settled"
def play():
LocomotionModule().playSimulation()
def stop():
LocomotionModule().finishSimulation()
def createAgent(name, x, y, *characterType):
for character in characterType:
characterType = character
AgentsCreationModule().addAgent(name, SrVec(x, 0, y), characterType)
scene.removeScript('locomotion')
locomotion = LocomotionModule()
scene.addScript('locomotion', locomotion)
|
{
"content_hash": "faf684815678bf9d9c5d2e9d95973bac",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 69,
"avg_line_length": 26.386138613861387,
"alnum_prop": 0.750093808630394,
"repo_name": "gsi-upm/SmartSim",
"id": "a747be9b0a4f04409625462b5bfdac486160a5cd",
"size": "2666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/SmartSimCharacterTypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11708"
},
{
"name": "C",
"bytes": "941473"
},
{
"name": "C#",
"bytes": "733730"
},
{
"name": "C++",
"bytes": "16389947"
},
{
"name": "CMake",
"bytes": "114424"
},
{
"name": "D",
"bytes": "175403"
},
{
"name": "GLSL",
"bytes": "45459"
},
{
"name": "Groff",
"bytes": "2619"
},
{
"name": "HTML",
"bytes": "1128698"
},
{
"name": "Inno Setup",
"bytes": "8592"
},
{
"name": "Java",
"bytes": "371478"
},
{
"name": "M4",
"bytes": "16806"
},
{
"name": "Makefile",
"bytes": "240549"
},
{
"name": "Objective-C",
"bytes": "4511"
},
{
"name": "Objective-C++",
"bytes": "29141"
},
{
"name": "Pascal",
"bytes": "13551"
},
{
"name": "Protocol Buffer",
"bytes": "3178"
},
{
"name": "Python",
"bytes": "989019"
},
{
"name": "Rust",
"bytes": "105"
},
{
"name": "Shell",
"bytes": "248995"
},
{
"name": "Smalltalk",
"bytes": "1540"
},
{
"name": "Smarty",
"bytes": "179"
},
{
"name": "XSLT",
"bytes": "3925"
}
],
"symlink_target": ""
}
|
import math
from transform import Transform
class Rotation(Transform):
def __init__(self, degree):
Transform.__init__(self)
degree = math.radians(degree)
self.trans_matrix = [[math.cos(degree), math.sin(degree)*-1.0, 0], [math.sin(degree), math.cos(degree), 0], [0, 0, 1]]
print self.trans_matrix
|
{
"content_hash": "319801930567cf92c1fc08fa28614a41",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 126,
"avg_line_length": 25.46153846153846,
"alnum_prop": 0.6374622356495468,
"repo_name": "germanogesser/computacao_grafica",
"id": "06e75278c3eff49b17a4615d422ec64b3a4d4d7a",
"size": "331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transform/rotation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "61661"
}
],
"symlink_target": ""
}
|
"""
===========================================================================
Visualising statistical significance thresholds on EEG data
===========================================================================
MNE-Python provides a range of tools for statistical hypothesis testing
and the visualisation of the results. Here, we show a few options for
exploratory and confirmatory tests - e.g., targeted t-tests, cluster-based
permutation approaches (here with Threshold-Free Cluster Enhancement);
and how to visualise the results.
The underlying data comes from [1]_; we contrast long vs. short words.
TFCE is described in [2]_.
References
----------
.. [1] Dufau, S., Grainger, J., Midgley, KJ., Holcomb, PJ. A thousand
words are worth a picture: Snapshots of printed-word processing in an
event-related potential megastudy. Psychological Science, 2015
.. [2] Smith and Nichols 2009, "Threshold-free cluster enhancement:
addressing problems of smoothing, threshold dependence, and
localisation in cluster inference", NeuroImage 44 (2009) 83-98.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind
import mne
from mne.channels import find_ch_connectivity, make_1020_channel_selections
from mne.stats import spatio_temporal_cluster_test
np.random.seed(0)
# Load the data
path = mne.datasets.kiloword.data_path() + '/kword_metadata-epo.fif'
epochs = mne.read_epochs(path)
name = "NumberOfLetters"
# Split up the data by the median length in letters via the attached metadata
median_value = str(epochs.metadata[name].median())
long_words = epochs[name + " > " + median_value]
short_words = epochs[name + " < " + median_value]
#############################################################################
# If we have a specific point in space and time we wish to test, it can be
# convenient to convert the data into Pandas Dataframe format. In this case,
# the :class:`mne.Epochs` object has a convenient
# :meth:`mne.Epochs.to_data_frame` method, which returns a dataframe.
# This dataframe can then be queried for specific time windows and sensors.
# The extracted data can be submitted to standard statistical tests. Here,
# we conduct t-tests on the difference between long and short words.
time_windows = ((.2, .25), (.35, .45))
elecs = ["Fz", "Cz", "Pz"]
# display the EEG data in Pandas format (first 5 rows)
print(epochs.to_data_frame()[elecs].head())
report = "{elec}, time: {tmin}-{tmax} s; t({df})={t_val:.3f}, p={p:.3f}"
print("\nTargeted statistical test results:")
for (tmin, tmax) in time_windows:
long_df = long_words.copy().crop(tmin, tmax).to_data_frame()
short_df = short_words.copy().crop(tmin, tmax).to_data_frame()
for elec in elecs:
# extract data
A = long_df[elec].groupby("condition").mean()
B = short_df[elec].groupby("condition").mean()
# conduct t test
t, p = ttest_ind(A, B)
# display results
format_dict = dict(elec=elec, tmin=tmin, tmax=tmax,
df=len(epochs.events) - 2, t_val=t, p=p)
print(report.format(**format_dict))
##############################################################################
# Absent specific hypotheses, we can also conduct an exploratory
# mass-univariate analysis at all sensors and time points. This requires
# correcting for multiple tests.
# MNE offers various methods for this; amongst them, cluster-based permutation
# methods allow deriving power from the spatio-temoral correlation structure
# of the data. Here, we use TFCE.
# Calculate statistical thresholds
con = find_ch_connectivity(epochs.info, "eeg")
# Extract data: transpose because the cluster test requires channels to be last
# In this case, inference is done over items. In the same manner, we could
# also conduct the test over, e.g., subjects.
X = [long_words.get_data().transpose(0, 2, 1),
short_words.get_data().transpose(0, 2, 1)]
tfce = dict(start=.2, step=.2)
t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(
X, tfce, n_permutations=100) # a more standard number would be 1000+
significant_points = cluster_pv.reshape(t_obs.shape).T < .05
print(str(significant_points.sum()) + " points selected by TFCE ...")
##############################################################################
# The results of these mass univariate analyses can be visualised by plotting
# :class:`mne.Evoked` objects as images (via :class:`mne.Evoked.plot_image`)
# and masking points for significance.
# Here, we group channels by Regions of Interest to facilitate localising
# effects on the head.
# We need an evoked object to plot the image to be masked
evoked = mne.combine_evoked([long_words.average(), -short_words.average()],
weights='equal') # calculate difference wave
time_unit = dict(time_unit="s")
evoked.plot_joint(title="Long vs. short words", ts_args=time_unit,
topomap_args=time_unit) # show difference wave
# Create ROIs by checking channel labels
selections = make_1020_channel_selections(evoked.info, midline="12z")
# Visualize the results
fig, axes = plt.subplots(nrows=3, figsize=(8, 8))
axes = {sel: ax for sel, ax in zip(selections, axes.ravel())}
evoked.plot_image(axes=axes, group_by=selections, colorbar=False, show=False,
mask=significant_points, show_names="all", titles=None,
**time_unit)
plt.colorbar(axes["Left"].images[-1], ax=list(axes.values()), shrink=.3,
label="uV")
plt.show()
|
{
"content_hash": "a34375eadee38d9a93ba8a8dcf59251e",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 43.55118110236221,
"alnum_prop": 0.6588320376062194,
"repo_name": "adykstra/mne-python",
"id": "e47fe2afc614192dc8ffc72507f19209ffd154c6",
"size": "5531",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tutorials/stats-sensor-space/plot_stats_cluster_erp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6001033"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
import datetime
import pprint
import inspect
from ...vendor.Qt import QtWidgets, QtCore
from ...vendor import qtawesome
from ... import io
from ... import api
from ... import pipeline
from .model import SubsetsModel, FamiliesFilterProxyModel
from .delegates import PrettyTimeDelegate, VersionDelegate
from . import lib
class SubsetWidget(QtWidgets.QWidget):
"""A widget that lists the published subsets for an asset"""
active_changed = QtCore.Signal() # active index changed
version_changed = QtCore.Signal() # version state changed for a subset
def __init__(self, parent=None):
super(SubsetWidget, self).__init__(parent=parent)
model = SubsetsModel()
proxy = QtCore.QSortFilterProxyModel()
family_proxy = FamiliesFilterProxyModel()
family_proxy.setSourceModel(proxy)
filter = QtWidgets.QLineEdit()
filter.setPlaceholderText("Filter subsets..")
view = QtWidgets.QTreeView()
view.setIndentation(5)
view.setStyleSheet("""
QTreeView::item{
padding: 5px 1px;
border: 0px;
}
""")
view.setAllColumnsShowFocus(True)
# Set view delegates
version_delegate = VersionDelegate()
column = model.COLUMNS.index("version")
view.setItemDelegateForColumn(column, version_delegate)
time_delegate = PrettyTimeDelegate()
column = model.COLUMNS.index("time")
view.setItemDelegateForColumn(column, time_delegate)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(filter)
layout.addWidget(view)
view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
view.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
view.setSortingEnabled(True)
view.sortByColumn(1, QtCore.Qt.AscendingOrder)
view.setAlternatingRowColors(True)
self.data = {
"delegates": {
"version": version_delegate,
"time": time_delegate
}
}
self.proxy = proxy
self.model = model
self.view = view
self.filter = filter
self.family_proxy = family_proxy
# settings and connections
self.proxy.setSourceModel(self.model)
self.proxy.setDynamicSortFilter(True)
self.proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.view.setModel(self.family_proxy)
self.view.customContextMenuRequested.connect(self.on_context_menu)
selection = view.selectionModel()
selection.selectionChanged.connect(self.active_changed)
self.filter.textChanged.connect(self.proxy.setFilterRegExp)
self.model.refresh()
# Expose this from the widget as a method
self.set_family_filters = self.family_proxy.setFamiliesFilter
def on_context_menu(self, point):
point_index = self.view.indexAt(point)
if not point_index.isValid():
return
# Get all representation->loader combinations available for the
# index under the cursor, so we can list the user the options.
available_loaders = api.discover(api.Loader)
loaders = list()
node = point_index.data(self.model.NodeRole)
version_id = node['version_document']['_id']
representations = io.find({"type": "representation",
"parent": version_id})
for representation in representations:
for loader in api.loaders_from_representation(
available_loaders,
representation['_id']
):
loaders.append((representation, loader))
if not loaders:
# no loaders available
self.echo("No compatible loaders available for this version.")
return
def sorter(value):
"""Sort the Loaders by their order and then their name"""
Plugin = value[1]
return Plugin.order, Plugin.__name__
# List the available loaders
menu = QtWidgets.QMenu(self)
for representation, loader in sorted(loaders, key=sorter):
# Label
label = getattr(loader, "label", None)
if label is None:
label = loader.__name__
# Add the representation as suffix
label = "{0} ({1})".format(label, representation['name'])
action = QtWidgets.QAction(label, menu)
action.setData((representation, loader))
# Add tooltip and statustip from Loader docstring
tip = inspect.getdoc(loader)
if tip:
action.setToolTip(tip)
action.setStatusTip(tip)
# Support font-awesome icons using the `.icon` and `.color`
# attributes on plug-ins.
icon = getattr(loader, "icon", None)
if icon is not None:
try:
key = "fa.{0}".format(icon)
color = getattr(loader, "color", "white")
action.setIcon(qtawesome.icon(key, color=color))
except Exception as e:
print("Unable to set icon for loader "
"{}: {}".format(loader, e))
menu.addAction(action)
# Show the context action menu
global_point = self.view.mapToGlobal(point)
action = menu.exec_(global_point)
if not action:
return
# Find the representation name and loader to trigger
action_representation, loader = action.data()
representation_name = action_representation['name'] # extension
# Run the loader for all selected indices, for those that have the
# same representation available
selection = self.view.selectionModel()
rows = selection.selectedRows(column=0)
# Ensure active point index is also used as first column so we can
# correctly push it to the end in the rows list.
point_index = point_index.sibling(point_index.row(), 0)
# Ensure point index is run first.
try:
rows.remove(point_index)
except ValueError:
pass
rows.insert(0, point_index)
# Trigger
for row in rows:
node = row.data(self.model.NodeRole)
version_id = node['version_document']['_id']
representation = io.find_one({"type": "representation",
"name": representation_name,
"parent": version_id})
if not representation:
self.echo("Subset '{}' has no representation '{}'".format(
node['subset'],
representation_name
))
continue
try:
api.load(Loader=loader, representation=representation['_id'])
except pipeline.IncompatibleLoaderError as exc:
self.echo(exc)
continue
def echo(self, message):
print(message)
class VersionTextEdit(QtWidgets.QTextEdit):
"""QTextEdit that displays version specific information.
This also overrides the context menu to add actions like copying
source path to clipboard or copying the raw data of the version
to clipboard.
"""
def __init__(self, parent=None):
super(VersionTextEdit, self).__init__(parent=parent)
self.data = {
"source": None,
"raw": None
}
# Reset
self.set_version(None)
def set_version(self, version_id):
if not version_id:
# Reset state to empty
self.data = {
"source": None,
"raw": None,
}
self.setText("")
self.setEnabled(True)
return
self.setEnabled(True)
version = io.find_one({"_id": version_id, "type": "version"})
assert version, "Not a valid version id"
subset = io.find_one({"_id": version['parent'], "type": "subset"})
assert subset, "No valid subset parent for version"
# Define readable creation timestamp
created = version["data"]["time"]
created = datetime.datetime.strptime(created, "%Y%m%dT%H%M%SZ")
created = datetime.datetime.strftime(created, "%b %d %Y %H:%M")
comment = version['data'].get("comment", None) or "No comment"
source = version['data'].get("source", None)
source_label = source if source else "No source"
# Store source and raw data
self.data['source'] = source
self.data['raw'] = version
data = {
"subset": subset['name'],
"version": version['name'],
"comment": comment,
"created": created,
"source": source_label
}
self.setHtml("""
<h3>{subset} v{version:03d}</h3>
<b>Comment</b><br>
{comment}<br>
<br>
<b>Created</b><br>
{created}<br>
<br>
<b>Source</b><br>
{source}<br>""".format(**data))
def contextMenuEvent(self, event):
"""Context menu with additional actions"""
menu = self.createStandardContextMenu()
# Add additional actions when any text so we can assume
# the version is set.
if self.toPlainText().strip():
menu.addSeparator()
action = QtWidgets.QAction("Copy source path to clipboard",
menu)
action.triggered.connect(self.on_copy_source)
menu.addAction(action)
action = QtWidgets.QAction("Copy raw data to clipboard",
menu)
action.triggered.connect(self.on_copy_raw)
menu.addAction(action)
menu.exec_(event.globalPos())
del menu
def on_copy_source(self):
"""Copy formatted source path to clipboard"""
source = self.data.get("source", None)
if not source:
return
path = source.format(root=api.registered_root())
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(path)
def on_copy_raw(self):
"""Copy raw version data to clipboard
The data is string formatted with `pprint.pformat`.
"""
raw = self.data.get("raw", None)
if not raw:
return
raw_text = pprint.pformat(raw)
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(raw_text)
class VersionWidget(QtWidgets.QWidget):
"""A Widget that display information about a specific version"""
def __init__(self, parent=None):
super(VersionWidget, self).__init__(parent=parent)
layout = QtWidgets.QVBoxLayout(self)
label = QtWidgets.QLabel("Version")
data = VersionTextEdit()
data.setReadOnly(True)
layout.addWidget(label)
layout.addWidget(data)
self.data = data
def set_version(self, version_id):
self.data.set_version(version_id)
class FamilyListWidget(QtWidgets.QListWidget):
"""A Widget that lists all available families"""
NameRole = QtCore.Qt.UserRole + 1
active_changed = QtCore.Signal(list)
def __init__(self, parent=None):
super(FamilyListWidget, self).__init__(parent=parent)
multi_select = QtWidgets.QAbstractItemView.ExtendedSelection
self.setSelectionMode(multi_select)
self.setAlternatingRowColors(True)
# Enable RMB menu
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.show_right_mouse_menu)
self.itemChanged.connect(self._on_item_changed)
def refresh(self):
"""Refresh the listed families.
This gets all unique families and adds them as checkable items to
the list.
"""
family = io.distinct("data.family")
families = io.distinct("data.families")
unique_families = list(set(family + families))
# Rebuild list
self.blockSignals(True)
self.clear()
for name in sorted(unique_families):
family = lib.get(lib.FAMILY_CONFIG, name)
label = family.get("label", name)
icon = family.get("icon", None)
# TODO: This should be more managable by the artist
# Temporarily implement support for a default state in the project
# configuration
state = family.get("state", True)
state = QtCore.Qt.Checked if state else QtCore.Qt.Unchecked
item = QtWidgets.QListWidgetItem(parent=self)
item.setText(label)
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setData(self.NameRole, name)
item.setCheckState(state)
if icon:
item.setIcon(icon)
self.addItem(item)
self.blockSignals(False)
self.active_changed.emit(self.get_filters())
def get_filters(self):
"""Return the checked family items"""
items = [self.item(i) for i in
range(self.count())]
return [item.data(self.NameRole) for item in items if
item.checkState() == QtCore.Qt.Checked]
def _on_item_changed(self):
self.active_changed.emit(self.get_filters())
def _set_checkstate_all(self, state):
_state = QtCore.Qt.Checked if state is True else QtCore.Qt.Unchecked
self.blockSignals(True)
for i in range(self.count()):
item = self.item(i)
item.setCheckState(_state)
self.blockSignals(False)
self.active_changed.emit(self.get_filters())
def show_right_mouse_menu(self, pos):
"""Build RMB menu under mouse at current position (within widget)"""
# Get mouse position
globalpos = self.viewport().mapToGlobal(pos)
menu = QtWidgets.QMenu(self)
# Add enable all action
state_checked = QtWidgets.QAction(menu, text="Enable All")
state_checked.triggered.connect(
lambda: self._set_checkstate_all(True))
# Add disable all action
state_unchecked = QtWidgets.QAction(menu, text="Disable All")
state_unchecked.triggered.connect(
lambda: self._set_checkstate_all(False))
menu.addAction(state_checked)
menu.addAction(state_unchecked)
menu.exec_(globalpos)
|
{
"content_hash": "3f7afc7d15eefdee110edb80c1cfaf1d",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 78,
"avg_line_length": 32.542410714285715,
"alnum_prop": 0.5899581589958159,
"repo_name": "MoonShineVFX/core",
"id": "a8da674f588398c14d0e4775e1bc1cf483b989fa",
"size": "14579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avalon/tools/cbloader/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "582724"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
}
|
"""
Tests for L{twisted.web.vhost}.
"""
from __future__ import absolute_import, division
from twisted.internet.defer import gatherResults
from twisted.trial.unittest import TestCase
from twisted.web.http import NOT_FOUND
from twisted.web.resource import NoResource
from twisted.web.static import Data
from twisted.web.server import Site
from twisted.web.vhost import (_HostResource,
NameVirtualHost,
VHostMonsterResource)
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class HostResourceTests(TestCase):
"""
Tests for L{_HostResource}.
"""
def test_getChild(self):
"""
L{_HostResource.getChild} returns the proper I{Resource} for the vhost
embedded in the URL. Verify that returning the proper I{Resource}
required changing the I{Host} in the header.
"""
bazroot = Data(b'root data', "")
bazuri = Data(b'uri data', "")
baztest = Data(b'test data', "")
bazuri.putChild(b'test', baztest)
bazroot.putChild(b'uri', bazuri)
hr = _HostResource()
root = NameVirtualHost()
root.default = Data(b'default data', "")
root.addHost(b'baz.com', bazroot)
request = DummyRequest([b'uri', b'test'])
request.prepath = [b'bar', b'http', b'baz.com']
request.site = Site(root)
request.isSecure = lambda: False
request.host = b''
step = hr.getChild(b'baz.com', request) # Consumes rest of path
self.assertIsInstance(step, Data)
request = DummyRequest([b'uri', b'test'])
step = root.getChild(b'uri', request)
self.assertIsInstance(step, NoResource)
class NameVirtualHostTests(TestCase):
"""
Tests for L{NameVirtualHost}.
"""
def test_renderWithoutHost(self):
"""
L{NameVirtualHost.render} returns the result of rendering the
instance's C{default} if it is not C{None} and there is no I{Host}
header in the request.
"""
virtualHostResource = NameVirtualHost()
virtualHostResource.default = Data(b"correct result", "")
request = DummyRequest([''])
self.assertEqual(
virtualHostResource.render(request), b"correct result")
def test_renderWithoutHostNoDefault(self):
"""
L{NameVirtualHost.render} returns a response with a status of I{NOT
FOUND} if the instance's C{default} is C{None} and there is no I{Host}
header in the request.
"""
virtualHostResource = NameVirtualHost()
request = DummyRequest([''])
d = _render(virtualHostResource, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, NOT_FOUND)
d.addCallback(cbRendered)
return d
def test_renderWithHost(self):
"""
L{NameVirtualHost.render} returns the result of rendering the resource
which is the value in the instance's C{host} dictionary corresponding
to the key indicated by the value of the I{Host} header in the request.
"""
virtualHostResource = NameVirtualHost()
virtualHostResource.addHost(b'example.org', Data(b"winner", ""))
request = DummyRequest([b''])
request.requestHeaders.addRawHeader(b'host', b'example.org')
d = _render(virtualHostResource, request)
def cbRendered(ignored, request):
self.assertEqual(b''.join(request.written), b"winner")
d.addCallback(cbRendered, request)
# The port portion of the Host header should not be considered.
requestWithPort = DummyRequest([b''])
requestWithPort.requestHeaders.addRawHeader(b'host', b'example.org:8000')
dWithPort = _render(virtualHostResource, requestWithPort)
def cbRendered(ignored, requestWithPort):
self.assertEqual(b''.join(requestWithPort.written), b"winner")
dWithPort.addCallback(cbRendered, requestWithPort)
return gatherResults([d, dWithPort])
def test_renderWithUnknownHost(self):
"""
L{NameVirtualHost.render} returns the result of rendering the
instance's C{default} if it is not C{None} and there is no host
matching the value of the I{Host} header in the request.
"""
virtualHostResource = NameVirtualHost()
virtualHostResource.default = Data(b"correct data", "")
request = DummyRequest([b''])
request.requestHeaders.addRawHeader(b'host', b'example.com')
d = _render(virtualHostResource, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b"correct data")
d.addCallback(cbRendered)
return d
def test_renderWithUnknownHostNoDefault(self):
"""
L{NameVirtualHost.render} returns a response with a status of I{NOT
FOUND} if the instance's C{default} is C{None} and there is no host
matching the value of the I{Host} header in the request.
"""
virtualHostResource = NameVirtualHost()
request = DummyRequest([''])
request.requestHeaders.addRawHeader(b'host', b'example.com')
d = _render(virtualHostResource, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, NOT_FOUND)
d.addCallback(cbRendered)
return d
def test_getChild(self):
"""
L{NameVirtualHost.getChild} returns correct I{Resource} based off
the header and modifies I{Request} to ensure proper prepath and
postpath are set.
"""
virtualHostResource = NameVirtualHost()
leafResource = Data(b"leaf data", "")
leafResource.isLeaf = True
normResource = Data(b"norm data", "")
virtualHostResource.addHost(b'leaf.example.org', leafResource)
virtualHostResource.addHost(b'norm.example.org', normResource)
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'host', b'norm.example.org')
request.prepath = [b'']
self.assertIsInstance(virtualHostResource.getChild(b'', request),
NoResource)
self.assertEqual(request.prepath, [b''])
self.assertEqual(request.postpath, [])
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'host', b'leaf.example.org')
request.prepath = [b'']
self.assertIsInstance(virtualHostResource.getChild(b'', request),
Data)
self.assertEqual(request.prepath, [])
self.assertEqual(request.postpath, [b''])
class VHostMonsterResourceTests(TestCase):
"""
Tests for L{VHostMonsterResource}.
"""
def test_getChild(self):
"""
L{VHostMonsterResource.getChild} returns I{_HostResource} and modifies
I{Request} with correct L{Request.isSecure}.
"""
vhm = VHostMonsterResource()
request = DummyRequest([])
self.assertIsInstance(vhm.getChild(b'http', request), _HostResource)
self.assertFalse(request.isSecure())
request = DummyRequest([])
self.assertIsInstance(vhm.getChild(b'https', request), _HostResource)
self.assertTrue(request.isSecure())
|
{
"content_hash": "9cba4f814bfd385bc75db0e3316b904b",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 81,
"avg_line_length": 37.18274111675127,
"alnum_prop": 0.6386348122866894,
"repo_name": "ArcherSys/ArcherSys",
"id": "d4d9852809bdca01a3437be59ae23287f8d0a0dc",
"size": "7398",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lib/site-packages/twisted/web/test/test_vhost.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 0, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 0);
|
{
"content_hash": "062297f829458e207c2e59171fef4ac8",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 162,
"avg_line_length": 37.42857142857143,
"alnum_prop": 0.7022900763358778,
"repo_name": "antoinecarme/pyaf",
"id": "b25099e075231f5aef7d17c544d1bb5e1a606e76",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Anscombe/trend_PolyTrend/cycle_0/ar_/test_artificial_128_Anscombe_PolyTrend_0__0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
class Solution(object):
def insert(self, intervals, newInterval):
"""
O(n)
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
if not newInterval:
return intervals
if not intervals:
return [newInterval]
res = []
for i in range(len(intervals)):
if intervals[i].end < newInterval.start:
res.append(intervals[i])
elif intervals[i].start > newInterval.end:
res.append(newInterval)
res += intervals[i:]
return res
else:
newInterval.start = min(newInterval.start, intervals[i].start)
newInterval.end = max(newInterval.end, intervals[i].end)
# imp
res.append(newInterval)
return res
|
{
"content_hash": "a2878da1a9b732a52bf285b129ec0ef6",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 33.61538461538461,
"alnum_prop": 0.528604118993135,
"repo_name": "youhusky/Facebook_Prepare",
"id": "da23d08b4790e8285a63c4f845be41b467f2cde9",
"size": "1485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "057. Insert Interval.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187109"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "csvt05.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "15088394ef8cb19e99e7d549866530b2",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "ysh329/django-test",
"id": "a298fcc43ccf1d0b4b98e5f7b108203d9579b2e6",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db10_admin/manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "59770"
},
{
"name": "HTML",
"bytes": "51133"
},
{
"name": "JavaScript",
"bytes": "129858"
},
{
"name": "Python",
"bytes": "99861"
},
{
"name": "Shell",
"bytes": "2135"
}
],
"symlink_target": ""
}
|
class DataPath(object):
def __init__(self, *path):
self._path = path
def __add__(self, other):
dp = DataPath()
dp._path = self._path + other._path
return dp
def __str__(self):
return '.'.join(map(str, (self._path)))
def __repr__(self):
return 'DataPath({})'.format(repr(self._path))
|
{
"content_hash": "4f7c304b285ddbe3eefc38051ca44bfd",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 54,
"avg_line_length": 23.4,
"alnum_prop": 0.50997150997151,
"repo_name": "23andMe/Yamale",
"id": "1f6b6d8987f9f7669b3a4981c359e6b8035f4741",
"size": "351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yamale/schema/datapath.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "319"
},
{
"name": "Python",
"bytes": "70433"
}
],
"symlink_target": ""
}
|
from tower_cli import models
from tests.compat import unittest, mock
class FieldTests(unittest.TestCase):
"""A set of tests to establish that the base Field class works in the
way we expect.
"""
def test_dunder_lt(self):
"""Establish that the `__lt__` comparison method on fields works
as expected.
"""
f1 = models.Field()
f2 = models.Field()
self.assertTrue(f1 < f2)
def test_dunder_gt(self):
"""Establish that the `__gt__` comparison method on fields works
in the way we expect.
"""
f1 = models.Field()
f2 = models.Field()
self.assertTrue(f2 > f1)
def test_help_property_explicit(self):
"""Establish that an explicitly provided help text is preserved
as the field's help.
"""
f1 = models.Field(help_text='foo bar baz')
self.assertEqual(f1.help, 'foo bar baz')
def test_help_property_implicit(self):
"""Establish that a sane implicit help text is provided if none is
specified.
"""
f1 = models.Field()
f1.name = 'f1'
self.assertEqual(f1.help, 'The f1 field.')
def test_flags_standard(self):
"""Establish that the `flags` property returns what I expect for a
run-of-the-mill field.
"""
f1 = models.Field()
self.assertEqual(f1.flags, ['str'])
def test_flags_unique_unfilterable(self):
"""Establish that the `flags` property successfully flags unfilterable
and unique flags.
"""
f1 = models.Field(unique=True, filterable=False)
self.assertIn('unique', f1.flags)
self.assertIn('not filterable', f1.flags)
def test_flags_read_only(self):
"""Establish that the `flags` property successfully flags read-only
flags.
"""
f = models.Field(read_only=True)
self.assertEqual(f.flags, ['str', 'read-only'])
def test_flags_not_required(self):
"""Establish that the `flags` property successfully flags a
not-required field.
"""
f = models.Field(type=int, required=False)
self.assertEqual(f.flags, ['int', 'not required'])
def test_flags_type(self):
"""Establish that the flags property successfully shows the correct
type name.
"""
f = models.Field(type=bool)
self.assertEqual(f.flags, ['bool'])
|
{
"content_hash": "b920f93cb2ce2a2295da54d19edcaee9",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 32.54666666666667,
"alnum_prop": 0.5964768537484637,
"repo_name": "tomfotherby/tower-cli",
"id": "e0d1f8d7ae247c98a5d287c5c36859f398906465",
"size": "3064",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_models_fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "268268"
}
],
"symlink_target": ""
}
|
import glob
import logging
import os.path
import pytest
import numpy as np
from metpy.io.nexrad import Level2File, Level3File, is_precip_mode
from metpy.cbook import get_test_data
# Turn off the warnings for tests
logging.getLogger("metpy.io.nexrad").setLevel(logging.CRITICAL)
#
# NEXRAD Level 2 Tests
#
# 1999 file tests old message 1
# KFTG tests bzip compression and newer format for a part of message 31
# KTLX 2015 has missing segments for message 18, which was causing exception
level2_files = ['KTLX20130520_201643_V06.gz', 'KTLX19990503_235621.gz',
'Level2_KFTG_20150430_1419.ar2v', 'KTLX20150530_000802_V06.bz2']
@pytest.mark.parametrize('fname', level2_files)
def test_level2(fname):
'Test reading NEXRAD level 2 files from the filename'
Level2File(get_test_data(fname, as_file_obj=False))
def test_level2_fobj():
'Test reading NEXRAD level2 data from a file object'
Level2File(get_test_data('Level2_KFTG_20150430_1419.ar2v'))
#
# NIDS/Level 3 Tests
#
nexrad_nids_files = glob.glob(os.path.join(get_test_data('nids', as_file_obj=False), 'K???_*'))
@pytest.mark.parametrize('fname', nexrad_nids_files)
def test_level3_files(fname):
'Test opening a NEXRAD NIDS file'
Level3File(fname)
tdwr_nids_files = glob.glob(os.path.join(get_test_data('nids', as_file_obj=False),
'Level3_MCI_*'))
@pytest.mark.parametrize('fname', tdwr_nids_files)
def test_tdwr_nids(fname):
'Test opening a TDWR NIDS file'
Level3File(fname)
def test_basic():
'Basic test of reading one specific NEXRAD NIDS file based on the filename'
Level3File(get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids', as_file_obj=False))
def test_tdwr():
'Test reading a specific TDWR file'
f = Level3File(get_test_data('nids/Level3_SLC_TV0_20160516_2359.nids'))
assert f.prod_desc.prod_code == 182
def test_nwstg():
'Test reading a nids file pulled from the NWSTG'
Level3File(get_test_data('nids/sn.last', as_file_obj=False))
def test_fobj():
'Test reading a specific NEXRAD NIDS files from a file object'
Level3File(get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids'))
def test21_precip():
'Test checking whether VCP 21 is precipitation mode'
assert is_precip_mode(21), 'VCP 21 is precip'
def test11_precip():
'Test checking whether VCP 11 is precipitation mode'
assert is_precip_mode(11), 'VCP 11 is precip'
def test31_clear_air():
'Test checking whether VCP 31 is clear air mode'
assert not is_precip_mode(31), 'VCP 31 is not precip'
def test_msg15():
'Check proper decoding of message type 15'
f = Level2File(get_test_data('KTLX20130520_201643_V06.gz', as_file_obj=False))
data = f.clutter_filter_map['data']
assert isinstance(data[0][0], list)
def test_tracks():
'Check that tracks are properly decoded'
f = Level3File(get_test_data('nids/KOUN_SDUS34_NSTTLX_201305202016'))
for data in f.sym_block[0]:
if 'track' in data:
x, y = np.array(data['track']).T
assert len(x)
assert len(y)
def test_vector_packet():
'Check that vector packets are properly decoded'
f = Level3File(get_test_data('nids/KOUN_SDUS64_NHITLX_201305202016'))
for page in f.graph_pages:
for item in page:
if 'vectors' in item:
x1, x2, y1, y2 = np.array(item['vectors']).T
assert len(x1)
assert len(x2)
assert len(y1)
assert len(y2)
|
{
"content_hash": "9f0429af9e89040193ea03654d5965e9",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 95,
"avg_line_length": 29.065573770491802,
"alnum_prop": 0.6731528482797519,
"repo_name": "deeplycloudy/MetPy",
"id": "1c90dede1d122477429204c03f58f95cac458022",
"size": "3690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metpy/io/tests/test_nexrad.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "989941"
},
{
"name": "Python",
"bytes": "473100"
}
],
"symlink_target": ""
}
|
"""The tests for the denonavr media player platform."""
from unittest.mock import patch
import pytest
from homeassistant.components import media_player
from homeassistant.components.denonavr.config_flow import (
CONF_MANUFACTURER,
CONF_SERIAL_NUMBER,
CONF_TYPE,
DOMAIN,
)
from homeassistant.components.denonavr.media_player import (
ATTR_COMMAND,
ATTR_DYNAMIC_EQ,
SERVICE_GET_COMMAND,
SERVICE_SET_DYNAMIC_EQ,
SERVICE_UPDATE_AUDYSSEY,
)
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_MODEL
from tests.common import MockConfigEntry
TEST_HOST = "1.2.3.4"
TEST_NAME = "Test_Receiver"
TEST_MODEL = "model5"
TEST_SERIALNUMBER = "123456789"
TEST_MANUFACTURER = "Denon"
TEST_RECEIVER_TYPE = "avr-x"
TEST_ZONE = "Main"
TEST_UNIQUE_ID = f"{TEST_MODEL}-{TEST_SERIALNUMBER}"
TEST_TIMEOUT = 2
TEST_SHOW_ALL_SOURCES = False
TEST_ZONE2 = False
TEST_ZONE3 = False
ENTITY_ID = f"{media_player.DOMAIN}.{TEST_NAME}"
@pytest.fixture(name="client")
def client_fixture():
"""Patch of client library for tests."""
with patch(
"homeassistant.components.denonavr.receiver.DenonAVR",
autospec=True,
) as mock_client_class, patch(
"homeassistant.components.denonavr.config_flow.denonavr.async_discover"
):
mock_client_class.return_value.name = TEST_NAME
mock_client_class.return_value.model_name = TEST_MODEL
mock_client_class.return_value.serial_number = TEST_SERIALNUMBER
mock_client_class.return_value.manufacturer = TEST_MANUFACTURER
mock_client_class.return_value.receiver_type = TEST_RECEIVER_TYPE
mock_client_class.return_value.zone = TEST_ZONE
mock_client_class.return_value.input_func_list = []
mock_client_class.return_value.sound_mode_list = []
mock_client_class.return_value.zones = {"Main": mock_client_class.return_value}
yield mock_client_class.return_value
async def setup_denonavr(hass):
"""Initialize media_player for tests."""
entry_data = {
CONF_HOST: TEST_HOST,
CONF_MODEL: TEST_MODEL,
CONF_TYPE: TEST_RECEIVER_TYPE,
CONF_MANUFACTURER: TEST_MANUFACTURER,
CONF_SERIAL_NUMBER: TEST_SERIALNUMBER,
}
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_UNIQUE_ID,
data=entry_data,
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state
assert state.name == TEST_NAME
async def test_get_command(hass, client):
"""Test generic command functionality."""
await setup_denonavr(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_COMMAND: "test_command",
}
await hass.services.async_call(DOMAIN, SERVICE_GET_COMMAND, data)
await hass.async_block_till_done()
client.async_get_command.assert_awaited_with("test_command")
async def test_dynamic_eq(hass, client):
"""Test that dynamic eq method works."""
await setup_denonavr(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_DYNAMIC_EQ: True,
}
# Verify on call
await hass.services.async_call(DOMAIN, SERVICE_SET_DYNAMIC_EQ, data)
await hass.async_block_till_done()
# Verify off call
data[ATTR_DYNAMIC_EQ] = False
await hass.services.async_call(DOMAIN, SERVICE_SET_DYNAMIC_EQ, data)
await hass.async_block_till_done()
client.async_dynamic_eq_on.assert_called_once()
client.async_dynamic_eq_off.assert_called_once()
async def test_update_audyssey(hass, client):
"""Test that dynamic eq method works."""
await setup_denonavr(hass)
# Verify call
await hass.services.async_call(
DOMAIN,
SERVICE_UPDATE_AUDYSSEY,
{
ATTR_ENTITY_ID: ENTITY_ID,
},
)
await hass.async_block_till_done()
client.async_update_audyssey.assert_called_once()
|
{
"content_hash": "a90f6d06689b37ead05fd57dba1922d7",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 87,
"avg_line_length": 29.16176470588235,
"alnum_prop": 0.6797781139687342,
"repo_name": "nkgilley/home-assistant",
"id": "4497025c11cebce444dbd4aed326e905f25900e0",
"size": "3966",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/denonavr/test_media_player.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
'''A full-screen minute:second timer. Leave it in charge of your conference
lighting talks.
After 5 minutes, the timer goes red. This limit is easily adjustable by
hacking the source code.
Press spacebar to start, stop and reset the timer.
'''
import pyglet
window = pyglet.window.Window(fullscreen=True)
class Timer(object):
def __init__(self):
self.label = pyglet.text.Label('00:00', font_size=360,
x=window.width//2, y=window.height//2,
anchor_x='center', anchor_y='center')
self.reset()
def reset(self):
self.time = 0
self.running = False
self.label.text = '00:00'
self.label.color = (255, 255, 255, 255)
def update(self, dt):
if self.running:
self.time += dt
m, s = divmod(self.time, 60)
self.label.text = '%02d:%02d' % (m, s)
if m >= 5:
self.label.color = (180, 0, 0, 255)
@window.event
def on_key_press(symbol, modifiers):
if symbol == pyglet.window.key.SPACE:
if timer.running:
timer.running = False
else:
if timer.time > 0:
timer.reset()
else:
timer.running = True
elif symbol == pyglet.window.key.ESCAPE:
window.close()
@window.event
def on_draw():
window.clear()
timer.label.draw()
timer = Timer()
pyglet.clock.schedule_interval(timer.update, 1/30.0)
pyglet.app.run()
|
{
"content_hash": "1a290a56776037e361b7adb66c4e9686",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 26.982142857142858,
"alnum_prop": 0.5592322964923891,
"repo_name": "nicememory/pie",
"id": "af9a7be5f9626d8db1af044da7badce5e947880c",
"size": "3252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyglet/examples/timer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5318"
},
{
"name": "C",
"bytes": "6624"
},
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "9229"
},
{
"name": "JavaScript",
"bytes": "6751"
},
{
"name": "Makefile",
"bytes": "5773"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9377528"
},
{
"name": "Shell",
"bytes": "664"
},
{
"name": "Vim script",
"bytes": "2952"
}
],
"symlink_target": ""
}
|
'''
Implements the cuda module as called from within an executing kernel
(@cuda.jit-decorated function).
'''
from contextlib import contextmanager
import sys
import threading
import traceback
import numpy as np
from numba.np import numpy_support
class Dim3(object):
'''
Used to implement thread/block indices/dimensions
'''
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __str__(self):
return '(%s, %s, %s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Dim3(%s, %s, %s)' % (self.x, self.y, self.z)
def __iter__(self):
yield self.x
yield self.y
yield self.z
class GridGroup:
'''
Used to implement the grid group.
'''
def sync(self):
# Synchronization of the grid group is equivalent to synchronization of
# the thread block, because we only support cooperative grids with one
# block.
threading.current_thread().syncthreads()
class FakeCUDACg:
'''
CUDA Cooperative Groups
'''
def this_grid(self):
return GridGroup()
class FakeCUDALocal(object):
'''
CUDA Local arrays
'''
def array(self, shape, dtype):
dtype = numpy_support.as_dtype(dtype)
return np.empty(shape, dtype)
class FakeCUDAConst(object):
'''
CUDA Const arrays
'''
def array_like(self, ary):
return ary
class FakeCUDAShared(object):
'''
CUDA Shared arrays.
Limitations: assumes that only one call to cuda.shared.array is on a line,
and that that line is only executed once per thread. i.e.::
a = cuda.shared.array(...); b = cuda.shared.array(...)
will erroneously alias a and b, and::
for i in range(10):
sharedarrs[i] = cuda.shared.array(...)
will alias all arrays created at that point (though it is not certain that
this would be supported by Numba anyway).
'''
def __init__(self, dynshared_size):
self._allocations = {}
self._dynshared_size = dynshared_size
self._dynshared = np.zeros(dynshared_size, dtype=np.byte)
def array(self, shape, dtype):
dtype = numpy_support.as_dtype(dtype)
# Dynamic shared memory is requested with size 0 - this all shares the
# same underlying memory
if shape == 0:
# Count must be the maximum number of whole elements that fit in the
# buffer (Numpy complains if the buffer is not a multiple of the
# element size)
count = self._dynshared_size // dtype.itemsize
return np.frombuffer(self._dynshared.data, dtype=dtype, count=count)
# Otherwise, identify allocations by source file and line number
# We pass the reference frame explicitly to work around
# http://bugs.python.org/issue25108
stack = traceback.extract_stack(sys._getframe())
caller = stack[-2][0:2]
res = self._allocations.get(caller)
if res is None:
res = np.empty(shape, dtype)
self._allocations[caller] = res
return res
addlock = threading.Lock()
sublock = threading.Lock()
andlock = threading.Lock()
orlock = threading.Lock()
xorlock = threading.Lock()
maxlock = threading.Lock()
minlock = threading.Lock()
caslock = threading.Lock()
inclock = threading.Lock()
declock = threading.Lock()
exchlock = threading.Lock()
class FakeCUDAAtomic(object):
def add(self, array, index, val):
with addlock:
old = array[index]
array[index] += val
return old
def sub(self, array, index, val):
with sublock:
old = array[index]
array[index] -= val
return old
def and_(self, array, index, val):
with andlock:
old = array[index]
array[index] &= val
return old
def or_(self, array, index, val):
with orlock:
old = array[index]
array[index] |= val
return old
def xor(self, array, index, val):
with xorlock:
old = array[index]
array[index] ^= val
return old
def inc(self, array, index, val):
with inclock:
old = array[index]
if old >= val:
array[index] = 0
else:
array[index] += 1
return old
def dec(self, array, index, val):
with declock:
old = array[index]
if (old == 0) or (old > val):
array[index] = val
else:
array[index] -= 1
return old
def exch(self, array, index, val):
with exchlock:
old = array[index]
array[index] = val
return old
def max(self, array, index, val):
with maxlock:
old = array[index]
array[index] = max(old, val)
return old
def min(self, array, index, val):
with minlock:
old = array[index]
array[index] = min(old, val)
return old
def nanmax(self, array, index, val):
with maxlock:
old = array[index]
array[index] = np.nanmax([array[index], val])
return old
def nanmin(self, array, index, val):
with minlock:
old = array[index]
array[index] = np.nanmin([array[index], val])
return old
def compare_and_swap(self, array, old, val):
with caslock:
index = (0,) * array.ndim
loaded = array[index]
if loaded == old:
array[index] = val
return loaded
class FakeCUDAFp16(object):
def hadd(self, a, b):
return a + b
def hsub(self, a, b):
return a - b
def hmul(self, a, b):
return a * b
def hfma(self, a, b, c):
return a * b + c
def hneg(self, a):
return -a
def habs(self, a):
return abs(a)
class FakeCUDAModule(object):
'''
An instance of this class will be injected into the __globals__ for an
executing function in order to implement calls to cuda.*. This will fail to
work correctly if the user code does::
from numba import cuda as something_else
In other words, the CUDA module must be called cuda.
'''
def __init__(self, grid_dim, block_dim, dynshared_size):
self.gridDim = Dim3(*grid_dim)
self.blockDim = Dim3(*block_dim)
self._cg = FakeCUDACg()
self._local = FakeCUDALocal()
self._shared = FakeCUDAShared(dynshared_size)
self._const = FakeCUDAConst()
self._atomic = FakeCUDAAtomic()
self._fp16 = FakeCUDAFp16()
@property
def cg(self):
return self._cg
@property
def local(self):
return self._local
@property
def shared(self):
return self._shared
@property
def const(self):
return self._const
@property
def atomic(self):
return self._atomic
@property
def fp16(self):
return self._fp16
@property
def threadIdx(self):
return threading.current_thread().threadIdx
@property
def blockIdx(self):
return threading.current_thread().blockIdx
@property
def warpsize(self):
return 32
@property
def laneid(self):
return threading.current_thread().thread_id % 32
def syncthreads(self):
threading.current_thread().syncthreads()
def threadfence(self):
# No-op
pass
def threadfence_block(self):
# No-op
pass
def threadfence_system(self):
# No-op
pass
def syncthreads_count(self, val):
return threading.current_thread().syncthreads_count(val)
def syncthreads_and(self, val):
return threading.current_thread().syncthreads_and(val)
def syncthreads_or(self, val):
return threading.current_thread().syncthreads_or(val)
def popc(self, val):
return bin(val).count("1")
def fma(self, a, b, c):
return a * b + c
def cbrt(self, a):
return a ** (1 / 3)
def brev(self, val):
return int('{:032b}'.format(val)[::-1], 2)
def clz(self, val):
s = '{:032b}'.format(val)
return len(s) - len(s.lstrip('0'))
def ffs(self, val):
# The algorithm is:
# 1. Count the number of trailing zeros.
# 2. Add 1, because the LSB is numbered 1 rather than 0, and so on.
# 3. If we've counted 32 zeros (resulting in 33), there were no bits
# set so we need to return zero.
s = '{:032b}'.format(val)
r = (len(s) - len(s.rstrip('0')) + 1) % 33
return r
def selp(self, a, b, c):
return b if a else c
def grid(self, n):
bdim = self.blockDim
bid = self.blockIdx
tid = self.threadIdx
x = bid.x * bdim.x + tid.x
if n == 1:
return x
y = bid.y * bdim.y + tid.y
if n == 2:
return (x, y)
z = bid.z * bdim.z + tid.z
if n == 3:
return (x, y, z)
raise RuntimeError("Global ID has 1-3 dimensions. %d requested" % n)
def gridsize(self, n):
bdim = self.blockDim
gdim = self.gridDim
x = bdim.x * gdim.x
if n == 1:
return x
y = bdim.y * gdim.y
if n == 2:
return (x, y)
z = bdim.z * gdim.z
if n == 3:
return (x, y, z)
raise RuntimeError("Global grid has 1-3 dimensions. %d requested" % n)
@contextmanager
def swapped_cuda_module(fn, fake_cuda_module):
from numba import cuda
fn_globs = fn.__globals__
# get all globals that is the "cuda" module
orig = dict((k, v) for k, v in fn_globs.items() if v is cuda)
# build replacement dict
repl = dict((k, fake_cuda_module) for k, v in orig.items())
# replace
fn_globs.update(repl)
try:
yield
finally:
# revert
fn_globs.update(orig)
|
{
"content_hash": "95559202389b3f894a8dc6a262f72e3c",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 80,
"avg_line_length": 24.997512437810947,
"alnum_prop": 0.559458652602249,
"repo_name": "IntelLabs/numba",
"id": "7455b7cd51811d36b1eea9aa59411749f8c5bef8",
"size": "10049",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numba/cuda/simulator/kernelapi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6984"
},
{
"name": "C",
"bytes": "639446"
},
{
"name": "C++",
"bytes": "93702"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "8764393"
},
{
"name": "Shell",
"bytes": "13542"
}
],
"symlink_target": ""
}
|
"""Provides various scoring methods for word strength."""
import re
import fuzzy
dmeta = fuzzy.DMetaphone()
soundex = fuzzy.Soundex(4)
def score_dmetaphone(words):
"""Score words using the double metaphone algorithm.
:param words (list): the list of words.
:rtype scores (list): the scored words
"""
scores = []
for word in words:
res, output = dmeta(word)
scores.append('{0}:{1}:{2}'.format(word, res, output))
return scores
def score_soundex(words):
"""Score words using the soundex algorithm.
:param words (list): the list of words.
:rtype scores (list): the scored words
"""
return ['{}: {}'.format(w.lower(), soundex(w)) for w in words]
def score_nysiis(words):
"""Score words using the nysiis algorithm.
:param words (list): the list of words.
:rtype scores (list): the scored words
"""
return ['{}: {}'.format(w.lower(), fuzzy.nysiis(w)) for w in words]
def score_length(word):
"""Return a score, 1-5, of the length of the word.
Really long, or really short words get a lower score.
There is no hard science, but popular opinion suggests
that a word somewhere between 8-15 letters is optimal.
:param word (str): The word to score.
:rtype score (int): The resulting score.
"""
if not word or len(word) == 0:
return 0
_len = len(word)
# 20+
if _len > 20:
return 1
# 15-20
elif _len > 15 and _len <= 20:
return 2
# 1-4
elif _len <= 4:
return 3
# 10-15
elif _len >= 10 and _len <= 15:
return 4
# 5-10
elif _len > 4 and _len < 10:
return 5
def bounded(num, start, end):
"""Determine if a number is within the bounds of `start` and `end`.
:param num (int): An integer.
:param start (int): A start minimum.
:param end (int): An end maximum.
:rtype is_bounded (bool): Whether number is bounded by start and end.
"""
return num >= start and num <= end
def score_pronounceability(word):
"""Get the ratio of vowels to consonants, a very basic measurement.
Half vowels and half consonants indicates a highly pronounceable word.
For example, 0.5 / 0.5 = 1.0, so one is perfect, and lower is worse.
The 1-5 scale translation:
0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0
0 1 2 3 4 5 4 3 2 1 5
:param word (string): The name
:rtype (int): The final pronounceability score
"""
if not word or len(word) == 0:
return 0
word = re.sub(r'[^a-zA-Z0-9]', '', word)
re_vowels = re.compile(r'[a|e|i|o|u]')
re_cons = re.compile(r'[^a|e|i|o|u]')
vowels = float(len(re.findall(re_vowels, word)))
consonants = float(len(re.findall(re_cons, word)))
if vowels is 0.0 or consonants is 0.0:
return 0
if vowels < consonants:
ratio = vowels / consonants
else:
ratio = consonants / vowels
if ratio == 0.0:
return 0
if ratio == 1.0:
return 5
if bounded(ratio, 0.0, 0.1) or bounded(ratio, 0.9, 1.0):
return 1
if bounded(ratio, 0.1, 0.2) or bounded(ratio, 0.8, 0.9):
return 2
if bounded(ratio, 0.2, 0.3) or bounded(ratio, 0.7, 0.8):
return 3
if bounded(ratio, 0.3, 0.4) or bounded(ratio, 0.6, 0.7):
return 4
if bounded(ratio, 0.4, 0.5) or bounded(ratio, 0.5, 0.6):
return 5
return 0
def score_simplicity(word):
"""Determine how simple the word is.
Simple is defined as the number of separate words.
In this case, higher is better, indicating a better score.
:param word (string): the name
:rtype score (int): the final simplicity score
>>> score_simplicity('the cat in the hat')
>>> 1
>>> score_simplicity('facebook')
>>> 5
"""
if not word or len(word) == 0:
return 0
word_count = len(re.split(r'[^a-z]', word))
if word_count == 1:
return 5
if word_count < 3:
return 4
if word_count < 4:
return 3
if word_count < 5:
return 2
# After 4+ words, the name has a very poor score.
return 1
def score_name_overall(word):
"""Score the name using separate scoring functions, then normalize to 100.
This method gives an overall intuitive score.
The closer to 100%, the better.
:param word (string): the name
:rtype score (float): the final name score
"""
length = score_length(word)
pronounceability = score_pronounceability(word)
simplicity = score_simplicity(word)
_scores = sum([length, pronounceability, simplicity])
score = round(_scores * 10)
# cut off at 100%
if score > 100:
return 100
return score
def score_names_overall(words):
"""Score all names.
:param words (list): the list of words.
:rtype words (list): a list of tuples, with the score and word.
"""
return [(score_name_overall(w), w) for w in words]
def generate_all_scoring(words):
"""Return all scoring methods for a set of words.
:param words (list): the list of words.
:rtype words (dict): the scores, keyed by scoring name.
"""
return {
'dmetaphone': score_dmetaphone(words),
'soundex': score_soundex(words),
'nysiis': score_nysiis(words),
'grade': score_names_overall(words)
}
|
{
"content_hash": "b39785cad2774d98881112bdfc508633",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 78,
"avg_line_length": 27.5,
"alnum_prop": 0.5970315398886827,
"repo_name": "christabor/namebot",
"id": "183712d351a97950557b190145f0ad6e1383f54b",
"size": "5390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "namebot/scoring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "684"
},
{
"name": "Python",
"bytes": "306650"
}
],
"symlink_target": ""
}
|
import csv
k = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware',
'District of Columbia', 'Florida', 'Georgia', 'Hawaii', 'Idaho', 'Illinois', 'Indiana', 'Iowa',
'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts', 'Michigan', 'Minnesota',
'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey', 'New Mexico',
'New York', 'North Carolina', 'North Dakota', 'Ohio', 'Oklahoma', 'Oregon', 'Pennsylvania',
'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'Utah', 'Vermont',
'Virginia', 'Washington', 'West Virginia','Wisconsin', 'Wyoming'
]
years = ['2016','2012', '2008', '2004', '2000']
for year in years:
csv_file = year + ".csv"
dataFile = open(csv_file, "r")
dataFile.readline()
c = 0
write_all = [['State', 'Abbreviation','Democratic','Republican']]
for line in dataFile.readlines():
data = line.split(",")
new_data = [ data[0], data[1], data[2][:-2] ]
state = [k[c]]
state.extend(new_data)
write_all.append( state)
c += 1
with open( year+".csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(write_all)
write_all = []
#
|
{
"content_hash": "86934860ca8abea549d5e8538aa29af8",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 104,
"avg_line_length": 30.512820512820515,
"alnum_prop": 0.6310924369747899,
"repo_name": "ub-cse442/election-and-data-sci",
"id": "dc5315ef38181733c39fc8c1b07641053a4bf987",
"size": "1190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "addStates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "502300"
},
{
"name": "Python",
"bytes": "42827"
},
{
"name": "R",
"bytes": "12027"
}
],
"symlink_target": ""
}
|
from measures.generic.Overhead import Overhead as Overhead
from measures.multicast.SentRemoteMulticastYYYMessages import SentRemoteMulticastYYYMessages as SentRemoteMulticastYYYMessages
class GraphsearchOverhead(Overhead):
def __init__(self, period, simulationTime):
Overhead.__init__(self, period, simulationTime)
self.addMeasure(SentRemoteMulticastYYYMessages(period, simulationTime, 'graphsearch.forward.message.FCompositionMessage'))
self.addMeasure(SentRemoteMulticastYYYMessages(period, simulationTime, 'graphsearch.backward.message.BCompositionMessage'))
self.addMeasure(SentRemoteMulticastYYYMessages(period, simulationTime, 'graphsearch.bidirectionalsearch.message.CompositionNotificationMessage'))
|
{
"content_hash": "d43c4c762088aff543d2e5a048422e92",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 147,
"avg_line_length": 49.2,
"alnum_prop": 0.8360433604336044,
"repo_name": "unaguil/hyperion-ns2",
"id": "70010e98c83a97335a174b6178b1089a1797a141",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/measures/graphsearch/GraphsearchOverhead.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from .base import (
Base64, NotEquivalentError, UndeliverableWarning, BrokerState,
QoS, Message, AbstractChannel, Channel, Management, Transport,
Empty, binding_key_t, queue_binding_t,
)
__all__ = (
'Base64', 'NotEquivalentError', 'UndeliverableWarning', 'BrokerState',
'QoS', 'Message', 'AbstractChannel', 'Channel', 'Management', 'Transport',
'Empty', 'binding_key_t', 'queue_binding_t',
)
|
{
"content_hash": "102d4b54f4afc8df37fdc19a661dce03",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 36.53846153846154,
"alnum_prop": 0.6989473684210527,
"repo_name": "urbn/kombu",
"id": "e2ee0b5d9f5ae509b7ea2635d7c006911c1bccda",
"size": "475",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kombu/transport/virtual/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1844"
},
{
"name": "Makefile",
"bytes": "3788"
},
{
"name": "Python",
"bytes": "1082894"
}
],
"symlink_target": ""
}
|
"""
Tests for the text processor.
"""
from __future__ import unicode_literals
import json
from unittest import TestCase, main
from datetime import datetime
import warnings
from nose.tools import * # PEP8 asserts
from nose.plugins.attrib import attr
from textblob.compat import PY2, unicode, basestring, binary_type
import textblob as tb
from textblob.packages import nltk
from textblob.np_extractors import ConllExtractor, FastNPExtractor
from textblob.taggers import NLTKTagger, PatternTagger
from textblob.tokenizers import WordTokenizer, SentenceTokenizer
from textblob.sentiments import NaiveBayesAnalyzer, PatternAnalyzer
from textblob.parsers import PatternParser
from textblob.classifiers import NaiveBayesClassifier
import textblob.wordnet as wn
Synset = nltk.corpus.reader.Synset
train = [
('I love this sandwich.', 'pos'),
('This is an amazing place!', 'pos'),
("What a truly amazing dinner.", 'pos'),
('I feel very good about these beers.', 'pos'),
('This is my best work.', 'pos'),
("What an awesome view", 'pos'),
('I do not like this restaurant', 'neg'),
('I am tired of this stuff.', 'neg'),
("I can't deal with this", 'neg'),
('He is my sworn enemy!', 'neg'),
('My boss is horrible.', 'neg')
]
test = [
('The beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feeling dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')
]
classifier = NaiveBayesClassifier(train)
class WordListTest(TestCase):
def setUp(self):
self.words = 'Beautiful is better than ugly'.split()
self.mixed = ['dog', 'dogs', 'blob', 'Blobs', 'text']
def test_len(self):
wl = tb.WordList(['Beautiful', 'is', 'better'])
assert_equal(len(wl), 3)
def test_slicing(self):
wl = tb.WordList(self.words)
first = wl[0]
assert_true(isinstance(first, tb.Word))
assert_equal(first, 'Beautiful')
dogs = wl[0:2]
assert_true(isinstance(dogs, tb.WordList))
assert_equal(dogs, tb.WordList(['Beautiful', 'is']))
def test_repr(self):
wl = tb.WordList(['Beautiful', 'is', 'better'])
if PY2:
assert_equal(repr(wl), "WordList([u'Beautiful', u'is', u'better'])")
else:
assert_equal(repr(wl), "WordList(['Beautiful', 'is', 'better'])")
def test_slice_repr(self):
wl = tb.WordList(['Beautiful', 'is', 'better'])
if PY2:
assert_equal(repr(wl[:2]), "WordList([u'Beautiful', u'is'])")
else:
assert_equal(repr(wl[:2]), "WordList(['Beautiful', 'is'])")
def test_str(self):
wl = tb.WordList(self.words)
assert_equal(str(wl), str(self.words))
def test_singularize(self):
wl = tb.WordList(['dogs', 'cats', 'buffaloes', 'men', 'mice'])
assert_equal(wl.singularize(), tb.WordList(['dog', 'cat', 'buffalo', 'man', 'mouse'
]))
def test_pluralize(self):
wl = tb.WordList(['dog', 'cat', 'buffalo'])
assert_equal(wl.pluralize(), tb.WordList(['dogs', 'cats', 'buffaloes']))
@attr('slow')
def test_lemmatize(self):
wl = tb.WordList(["cat", "dogs", "oxen"])
assert_equal(wl.lemmatize(), tb.WordList(['cat', 'dog', 'ox']))
def test_upper(self):
wl = tb.WordList(self.words)
assert_equal(wl.upper(), tb.WordList([w.upper() for w in self.words]))
def test_lower(self):
wl = tb.WordList(['Zen', 'oF', 'PYTHON'])
assert_equal(wl.lower(), tb.WordList(['zen', 'of', 'python']))
def test_count(self):
wl = tb.WordList(['monty', 'python', 'Python', 'Monty'])
assert_equal(wl.count('monty'), 2)
assert_equal(wl.count('monty', case_sensitive=True), 1)
assert_equal(wl.count('mon'), 0)
def test_convert_to_list(self):
wl = tb.WordList(self.words)
assert_equal(list(wl), self.words)
def test_append(self):
wl = tb.WordList(['dog'])
wl.append("cat")
assert_true(isinstance(wl[1], tb.Word))
wl.append(('a', 'tuple'))
assert_true(isinstance(wl[2], tuple))
def test_extend(self):
wl = tb.WordList(["cats", "dogs"])
wl.extend(["buffalo", 4])
assert_true(isinstance(wl[2], tb.Word))
assert_true(isinstance(wl[3], int))
class SentenceTest(TestCase):
def setUp(self):
self.raw_sentence = \
'Any place with frites and Belgian beer has my vote.'
self.sentence = tb.Sentence(self.raw_sentence)
def test_repr(self):
# In Py2, repr returns bytestring
if PY2:
assert_equal(repr(self.sentence),
b"Sentence(\"{0}\")".format(binary_type(self.raw_sentence)))
# In Py3, returns text type string
else:
assert_equal(repr(self.sentence), 'Sentence("{0}")'.format(self.raw_sentence))
def test_stripped_sentence(self):
assert_equal(self.sentence.stripped,
'any place with frites and belgian beer has my vote')
def test_len(self):
assert_equal(len(self.sentence), len(self.raw_sentence))
@attr('slow')
def test_dict(self):
sentence_dict = self.sentence.dict
assert_equal(sentence_dict, {
'raw': self.raw_sentence,
'start_index': 0,
'polarity': 0.0,
'subjectivity': 0.0,
'end_index': len(self.raw_sentence) - 1,
'stripped': 'any place with frites and belgian beer has my vote',
'noun_phrases': self.sentence.noun_phrases,
})
def test_pos_tags(self):
then1 = datetime.now()
tagged = self.sentence.pos_tags
now1 = datetime.now()
t1 = now1 - then1
then2 = datetime.now()
tagged = self.sentence.pos_tags
now2 = datetime.now()
t2 = now2 - then2
# Getting the pos tags the second time should be faster
# because they were stored as an attribute the first time
assert_true(t2 < t1)
assert_equal(tagged,
[('Any', 'DT'), ('place', 'NN'), ('with', 'IN'),
('frites', 'NNS'), ('and', 'CC'), ('Belgian', 'JJ'),
('beer', 'NN'), ('has', 'VBZ'), ('my', 'PRP$'),
('vote', 'NN')]
)
@attr('slow')
def test_noun_phrases(self):
nps = self.sentence.noun_phrases
assert_equal(nps, ['belgian beer'])
def test_words_are_word_objects(self):
words = self.sentence.words
assert_true(isinstance(words[0], tb.Word))
assert_equal(words[1].pluralize(), 'places')
def test_string_equality(self):
assert_equal(self.sentence, 'Any place with frites and Belgian beer has my vote.')
@attr("requires_internet")
def test_translate(self):
blob = tb.Sentence("This is a sentence.")
translated = blob.translate(to="es")
assert_true(isinstance(translated, tb.Sentence))
assert_equal(translated, "Esta es una frase.")
def test_correct(self):
blob = tb.Sentence("I havv bad speling.")
assert_true(isinstance(blob.correct(), tb.Sentence))
assert_equal(blob.correct(), tb.Sentence("I have bad spelling."))
blob = tb.Sentence("I havv \ngood speling.")
assert_true(isinstance(blob.correct(), tb.Sentence))
assert_equal(blob.correct(), tb.Sentence("I have \ngood spelling."))
@attr('requires_internet')
def test_translate_detects_language_by_default(self):
blob = tb.TextBlob(unicode("ذات سيادة كاملة"))
assert_equal(blob.translate(), "With full sovereignty")
class TextBlobTest(TestCase):
def setUp(self):
self.text = \
"""Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"""
self.blob = tb.TextBlob(self.text)
self.np_test_text = '''
Python is a widely used general-purpose, high-level programming language.
Its design philosophy emphasizes code readability, and its syntax allows
programmers to express concepts in fewer
lines of code than would be possible in languages such as C.
The language provides constructs intended to enable clear programs on both a small and large scale.
Python supports multiple programming paradigms, including object-oriented,
imperative and functional programming or procedural styles.
It features a dynamic type system and automatic memory management and
has a large and comprehensive standard library. Like other dynamic languages, Python is often used as a scripting language,
but is also used in a wide range of non-scripting contexts.
Using third-party tools, Python code can be packaged into standalone executable
programs. Python interpreters are available for many operating systems. CPython, the reference implementation of Python, is free and open source software and h
as a community-based development model, as do nearly all of its alternative implementations. CPython
is managed by the non-profit Python Software Foundation.'''
self.np_test_blob = tb.TextBlob(self.np_test_text)
self.short = "Beautiful is better than ugly. "
self.short_blob = tb.TextBlob(self.short)
def test_init(self):
blob = tb.TextBlob('Wow I love this place. It really rocks my socks!')
assert_equal(len(blob.sentences), 2)
assert_equal(blob.sentences[1].stripped, 'it really rocks my socks')
assert_equal(blob.string, blob.raw)
# Must initialize with a string
assert_raises(TypeError, tb.TextBlob.__init__, ['invalid'])
def test_string_equality(self):
blob = tb.TextBlob("Textblobs should be equal to strings.")
assert_equal(blob, "Textblobs should be equal to strings.")
def test_string_comparison(self):
blob = tb.TextBlob("apple")
assert_true(blob < "banana")
assert_true(blob > 'aardvark')
def test_hash(self):
blob = tb.TextBlob('apple')
assert_equal(hash(blob), hash('apple'))
assert_not_equal(hash(blob), hash('banana'))
def test_stripped(self):
blob = tb.TextBlob("Um... well this ain't right.!..")
assert_equal(blob.stripped, "um well this aint right")
def test_ngrams(self):
blob = tb.TextBlob("I am eating a pizza.")
three_grams = blob.ngrams()
assert_equal(three_grams, [
tb.WordList(('I', 'am', 'eating')),
tb.WordList(('am', 'eating', 'a')),
tb.WordList(('eating', 'a', 'pizza'))
])
four_grams = blob.ngrams(n=4)
assert_equal(four_grams, [
tb.WordList(('I', 'am', 'eating', 'a')),
tb.WordList(('am', 'eating', 'a', 'pizza'))
])
def test_clean_html(self):
html = '<b>Python</b> is a widely used <a href="/wiki/General-purpose_programming_language" title="General-purpose programming language">general-purpose</a>, <a href="/wiki/High-level_programming_language" title="High-level programming language">high-level programming language</a>.'
assert_raises(NotImplementedError, lambda: tb.TextBlob(html, clean_html=True))
def test_sentences(self):
blob = self.blob
assert_equal(len(blob.sentences), 19)
assert_true(isinstance(blob.sentences[0], tb.Sentence))
def test_senences_with_space_before_punctuation(self):
text = "Uh oh. This sentence might cause some problems. : Now we're ok."
b = tb.TextBlob(text)
assert_equal(len(b.sentences), 3)
def test_sentiment_of_foreign_text(self):
blob = tb.TextBlob(u'Nous avons cherch\xe9 un motel dans la r\xe9gion de '
'Madison, mais les motels ne sont pas nombreux et nous avons '
'finalement choisi un Motel 6, attir\xe9s par le bas '
'prix de la chambre.')
assert_true(isinstance(blob.sentiment[0], float))
def test_iter(self):
for i, letter in enumerate(self.short_blob):
assert_equal(letter, self.short[i])
def test_raw_sentences(self):
blob = tb.TextBlob(self.text)
assert_equal(len(blob.raw_sentences), 19)
assert_equal(blob.raw_sentences[0], "Beautiful is better than ugly.")
def test_blob_with_no_sentences(self):
text = "this isn't really a sentence it's just a long string of words"
blob = tb.TextBlob(text)
# the blob just has one sentence
assert_equal(len(blob.sentences), 1)
# the start index is 0, the end index is len(text) - 1
assert_equal(blob.sentences[0].start_index, 0)
assert_equal(blob.sentences[0].end_index, len(text))
def test_len(self):
blob = tb.TextBlob('lorem ipsum')
assert_equal(len(blob), len('lorem ipsum'))
def test_repr(self):
blob1 = tb.TextBlob('lorem ipsum')
if PY2:
assert_equal(repr(blob1), b"TextBlob(\"{0}\")".format(binary_type('lorem ipsum')))
else:
assert_equal(repr(blob1), "TextBlob(\"{0}\")".format('lorem ipsum'))
def test_cmp(self):
blob1 = tb.TextBlob('lorem ipsum')
blob2 = tb.TextBlob('lorem ipsum')
blob3 = tb.TextBlob('dolor sit amet')
assert_true(blob1 == blob2) # test ==
assert_true(blob1 > blob3) # test >
assert_true(blob1 >= blob3) # test >=
assert_true(blob3 < blob2) # test <
assert_true(blob3 <= blob2) # test <=
def test_invalid_comparison(self):
blob = tb.TextBlob("one")
if PY2:
# invalid comparison returns False
assert_false(blob < 2)
else:
# invalid comparison raises Error
with assert_raises(TypeError):
blob < 2
def test_words(self):
blob = tb.TextBlob('Beautiful is better than ugly. '
'Explicit is better than implicit.')
assert_true(isinstance(blob.words, tb.WordList))
assert_equal(blob.words, tb.WordList([
'Beautiful',
'is',
'better',
'than',
'ugly',
'Explicit',
'is',
'better',
'than',
'implicit',
]))
short = tb.TextBlob("Just a bundle of words")
assert_equal(short.words, tb.WordList([
'Just', 'a', 'bundle', 'of', 'words'
]))
def test_words_includes_apostrophes_in_contractions(self):
blob = tb.TextBlob("Let's test this.")
assert_equal(blob.words, tb.WordList(['Let', "'s", "test", "this"]))
blob2 = tb.TextBlob("I can't believe it's not butter.")
assert_equal(blob2.words, tb.WordList(['I', 'ca', "n't", "believe",
'it', "'s", "not", "butter"]))
def test_pos_tags(self):
blob = tb.TextBlob('Simple is better than complex. '
'Complex is better than complicated.')
assert_equal(blob.pos_tags, [
('Simple', 'JJ'),
('is', 'VBZ'),
('better', 'JJR'),
('than', 'IN'),
('complex', 'JJ'),
('Complex', 'NNP'),
('is', 'VBZ'),
('better', 'JJR'),
('than', 'IN'),
('complicated', 'VBN'),
])
def test_tags(self):
assert_equal(self.blob.tags, self.blob.pos_tags)
def test_tagging_nonascii(self):
b = tb.TextBlob('Learn how to make the five classic French mother sauces: '
'Béchamel, Tomato Sauce, Espagnole, Velouté and Hollandaise.')
tags = b.tags
assert_true(isinstance(tags[0][0], unicode))
def test_pos_tags_includes_one_letter_articles(self):
blob = tb.TextBlob("This is a sentence.")
assert_equal(blob.pos_tags[2][0], 'a')
@attr('slow')
def test_np_extractor_defaults_to_fast_tagger(self):
text = "Python is a high-level scripting language."
blob1 = tb.TextBlob(text)
assert_true(isinstance(blob1.np_extractor, FastNPExtractor))
def test_np_extractor_is_shared_among_instances(self):
blob1 = tb.TextBlob("This is one sentence")
blob2 = tb.TextBlob("This is another sentence")
assert_true(blob1.np_extractor is blob2.np_extractor)
@attr('slow')
def test_can_use_different_np_extractors(self):
e = ConllExtractor()
text = "Python is a high-level scripting language."
blob = tb.TextBlob(text)
blob.np_extractor = e
assert_true(isinstance(blob.np_extractor, ConllExtractor))
def test_can_use_different_sentanalyzer(self):
blob = tb.TextBlob("I love this car", analyzer=NaiveBayesAnalyzer())
assert_true(isinstance(blob.analyzer, NaiveBayesAnalyzer))
@attr("slow")
def test_discrete_sentiment(self):
blob = tb.TextBlob("I feel great today.", analyzer=NaiveBayesAnalyzer())
assert_equal(blob.sentiment[0], 'pos')
def test_can_get_subjectivity_and_polarity_with_different_analyzer(self):
blob = tb.TextBlob("I love this car.", analyzer=NaiveBayesAnalyzer())
pattern = PatternAnalyzer()
assert_equal(blob.polarity, pattern.analyze(str(blob))[0])
assert_equal(blob.subjectivity, pattern.analyze(str(blob))[1])
def test_pos_tagger_defaults_to_pattern(self):
blob = tb.TextBlob("some text")
assert_true(isinstance(blob.pos_tagger, PatternTagger))
def test_pos_tagger_is_shared_among_instances(self):
blob1 = tb.TextBlob("This is one sentence")
blob2 = tb.TextBlob("This is another sentence.")
assert_true(blob1.pos_tagger is blob2.pos_tagger)
def test_can_use_different_pos_tagger(self):
tagger = NLTKTagger()
blob = tb.TextBlob("this is some text", pos_tagger=tagger)
assert_true(isinstance(blob.pos_tagger, NLTKTagger))
@attr('slow')
def test_can_pass_np_extractor_to_constructor(self):
e = ConllExtractor()
blob = tb.TextBlob('Hello world!', np_extractor=e)
assert_true(isinstance(blob.np_extractor, ConllExtractor))
def test_getitem(self):
blob = tb.TextBlob('lorem ipsum')
assert_equal(blob[0], 'l')
assert_equal(blob[0:5], tb.TextBlob('lorem'))
def test_upper(self):
blob = tb.TextBlob('lorem ipsum')
assert_true(is_blob(blob.upper()))
assert_equal(blob.upper(), tb.TextBlob('LOREM IPSUM'))
def test_upper_and_words(self):
blob = tb.TextBlob('beautiful is better')
assert_equal(blob.upper().words, tb.WordList(['BEAUTIFUL', 'IS', 'BETTER'
]))
def test_lower(self):
blob = tb.TextBlob('Lorem Ipsum')
assert_true(is_blob(blob.lower()))
assert_equal(blob.lower(), tb.TextBlob('lorem ipsum'))
def test_find(self):
text = 'Beautiful is better than ugly.'
blob = tb.TextBlob(text)
assert_equal(blob.find('better', 5, len(blob)), text.find('better', 5,
len(text)))
def test_rfind(self):
text = 'Beautiful is better than ugly. '
blob = tb.TextBlob(text)
assert_equal(blob.rfind('better'), text.rfind('better'))
def test_startswith(self):
blob = tb.TextBlob(self.text)
assert_true(blob.startswith('Beautiful'))
assert_true(blob.starts_with('Beautiful'))
def test_endswith(self):
blob = tb.TextBlob(self.text)
assert_true(blob.endswith('of those!'))
assert_true(blob.ends_with('of those!'))
def test_split(self):
blob = tb.TextBlob('Beautiful is better')
assert_equal(blob.split(), tb.WordList(['Beautiful', 'is', 'better']))
def test_title(self):
blob = tb.TextBlob('Beautiful is better')
assert_equal(blob.title(), tb.TextBlob('Beautiful Is Better'))
def test_format(self):
blob = tb.TextBlob('1 + 1 = {0}')
assert_equal(blob.format(1 + 1), tb.TextBlob('1 + 1 = 2'))
assert_equal('1 + 1 = {0}'.format(tb.TextBlob('2')), '1 + 1 = 2')
def test_using_indices_for_slicing(self):
blob = tb.TextBlob("Hello world. How do you do?")
sent1, sent2 = blob.sentences
assert_equal(blob[sent1.start:sent1.end], tb.TextBlob(str(sent1)))
assert_equal(blob[sent2.start:sent2.end], tb.TextBlob(str(sent2)))
def test_indices_with_only_one_sentences(self):
blob = tb.TextBlob("Hello world.")
sent1 = blob.sentences[0]
assert_equal(blob[sent1.start:sent1.end], tb.TextBlob(str(sent1)))
def test_indices_with_multiple_puncutations(self):
blob = tb.TextBlob("Hello world. How do you do?! This has an ellipses...")
sent1, sent2, sent3 = blob.sentences
assert_equal(blob[sent2.start:sent2.end], tb.TextBlob("How do you do?!"))
assert_equal(blob[sent3.start:sent3.end], tb.TextBlob("This has an ellipses..."))
def test_indices_short_names(self):
blob = tb.TextBlob(self.text)
last_sentence = blob.sentences[len(blob.sentences) - 1]
assert_equal(last_sentence.start, last_sentence.start_index)
assert_equal(last_sentence.end, last_sentence.end_index)
def test_replace(self):
blob = tb.TextBlob('textblob is a blobby blob')
assert_equal(blob.replace('blob', 'bro'),
tb.TextBlob('textbro is a broby bro'))
assert_equal(blob.replace('blob', 'bro', 1),
tb.TextBlob('textbro is a blobby blob'))
def test_join(self):
l = ['explicit', 'is', 'better']
wl = tb.WordList(l)
assert_equal(tb.TextBlob(' ').join(l), tb.TextBlob('explicit is better'))
assert_equal(tb.TextBlob(' ').join(wl), tb.TextBlob('explicit is better'))
@attr('slow')
def test_blob_noun_phrases(self):
noun_phrases = self.np_test_blob.noun_phrases
assert_true('python' in noun_phrases)
assert_true('design philosophy' in noun_phrases)
def test_word_counts(self):
blob = tb.TextBlob('Buffalo buffalo ate my blue buffalo.')
assert_equal(dict(blob.word_counts), {
'buffalo': 3,
'ate': 1,
'my': 1,
'blue': 1
})
assert_equal(blob.word_counts['buffalo'], 3)
assert_equal(blob.words.count('buffalo'), 3)
assert_equal(blob.words.count('buffalo', case_sensitive=True), 2)
assert_equal(blob.word_counts['blue'], 1)
assert_equal(blob.words.count('blue'), 1)
assert_equal(blob.word_counts['ate'], 1)
assert_equal(blob.words.count('ate'), 1)
assert_equal(blob.word_counts['buff'], 0)
assert_equal(blob.words.count('buff'), 0)
blob2 = tb.TextBlob(self.text)
assert_equal(blob2.words.count('special'), 2)
assert_equal(blob2.words.count('special', case_sensitive=True), 1)
@attr('slow')
def test_np_counts(self):
# Add some text so that we have a noun phrase that
# has a frequency greater than 1
noun_phrases = self.np_test_blob.noun_phrases
assert_equal(noun_phrases.count('python'), 6)
assert_equal(self.np_test_blob.np_counts['python'], noun_phrases.count('python'))
assert_equal(noun_phrases.count('cpython'), 2)
assert_equal(noun_phrases.count('not found'), 0)
def test_add(self):
blob1 = tb.TextBlob('Hello, world! ')
blob2 = tb.TextBlob('Hola mundo!')
# Can add two text blobs
assert_equal(blob1 + blob2, tb.TextBlob('Hello, world! Hola mundo!'))
# Can also add a string to a tb.TextBlob
assert_equal(blob1 + 'Hola mundo!',
tb.TextBlob('Hello, world! Hola mundo!'))
# Or both
assert_equal(blob1 + blob2 + ' Goodbye!',
tb.TextBlob('Hello, world! Hola mundo! Goodbye!'))
# operands must be strings
assert_raises(TypeError, blob1.__add__, ['hello'])
def test_unicode(self):
blob = tb.TextBlob(self.text)
assert_equal(str(blob), str(self.text))
def test_strip(self):
text = 'Beautiful is better than ugly. '
blob = tb.TextBlob(text)
assert_true(is_blob(blob))
assert_equal(blob.strip(), tb.TextBlob(text.strip()))
def test_strip_and_words(self):
blob = tb.TextBlob('Beautiful is better! ')
assert_equal(blob.strip().words, tb.WordList(['Beautiful', 'is', 'better'
]))
def test_index(self):
blob = tb.TextBlob(self.text)
assert_equal(blob.index('Namespaces'), self.text.index('Namespaces'))
def test_sentences_after_concatenation(self):
blob1 = tb.TextBlob('Beautiful is better than ugly. ')
blob2 = tb.TextBlob('Explicit is better than implicit.')
concatenated = blob1 + blob2
assert_equal(len(concatenated.sentences), 2)
def test_sentiment(self):
positive = tb.TextBlob('This is the best, most amazing '
'text-processing library ever!')
assert_true(positive.sentiment[0] > 0.0)
negative = tb.TextBlob("bad bad bitches that's my muthufuckin problem.")
assert_true(negative.sentiment[0] < 0.0)
zen = tb.TextBlob(self.text)
assert_equal(round(zen.sentiment[0], 1), 0.2)
def test_subjectivity(self):
positive = tb.TextBlob("Oh my god this is so amazing! I'm so happy!")
assert_true(isinstance(positive.subjectivity, float))
assert_true(positive.subjectivity > 0)
def test_polarity(self):
positive = tb.TextBlob("Oh my god this is so amazing! I'm so happy!")
assert_true(isinstance(positive.polarity, float))
assert_true(positive.polarity > 0)
def test_sentiment_of_emoticons(self):
b1 = tb.TextBlob("Faces have values =)")
b2 = tb.TextBlob("Faces have values")
assert_true(b1.sentiment[0] > b2.sentiment[0])
def test_bad_init(self):
assert_raises(TypeError, lambda: tb.TextBlob(['bad']))
assert_raises(ValueError, lambda: tb.TextBlob("this is fine",
np_extractor="this is not fine"))
assert_raises(ValueError, lambda: tb.TextBlob("this is fine",
pos_tagger="this is not fine"))
def test_in(self):
blob = tb.TextBlob('Beautiful is better than ugly. ')
assert_true('better' in blob)
assert_true('fugly' not in blob)
@attr('slow')
def test_json(self):
blob = tb.TextBlob('Beautiful is better than ugly. ')
assert_equal(blob.json, blob.to_json())
blob_dict = json.loads(blob.json)[0]
assert_equal(blob_dict['stripped'], 'beautiful is better than ugly')
assert_equal(blob_dict['noun_phrases'], blob.sentences[0].noun_phrases)
assert_equal(blob_dict['start_index'], blob.sentences[0].start)
assert_equal(blob_dict['end_index'], blob.sentences[0].end)
assert_almost_equal(blob_dict['polarity'],
blob.sentences[0].polarity, places=4)
assert_almost_equal(blob_dict['subjectivity'],
blob.sentences[0].subjectivity, places=4)
def test_words_are_word_objects(self):
words = self.blob.words
assert_true(isinstance(words[0], tb.Word))
def test_words_have_pos_tags(self):
blob = tb.TextBlob('Simple is better than complex. '
'Complex is better than complicated.')
first_word, first_tag = blob.pos_tags[0]
assert_true(isinstance(first_word, tb.Word))
assert_equal(first_word.pos_tag, first_tag)
def test_tokenizer_defaults_to_word_tokenizer(self):
assert_true(isinstance(self.blob.tokenizer, WordTokenizer))
def test_tokens_property(self):
assert_true(self.blob.tokens,
tb.WordList(WordTokenizer().tokenize(self.text)))
def test_can_use_an_different_tokenizer(self):
tokenizer = nltk.tokenize.TabTokenizer()
blob = tb.TextBlob("This is\ttext.", tokenizer=tokenizer)
assert_equal(blob.tokens, tb.WordList(["This is", "text."]))
def test_tokenize_method(self):
tokenizer = nltk.tokenize.TabTokenizer()
blob = tb.TextBlob("This is\ttext.")
# If called without arguments, should default to WordTokenizer
assert_equal(blob.tokenize(), tb.WordList(["This", "is", "text", "."]))
# Pass in the TabTokenizer
assert_equal(blob.tokenize(tokenizer), tb.WordList(["This is", "text."]))
@attr("requires_internet")
def test_translate(self):
blob = tb.TextBlob("This is a sentence.")
translated = blob.translate(to="es")
assert_true(isinstance(translated, tb.TextBlob))
assert_equal(translated, "Esta es una frase.")
es_blob = tb.TextBlob("Esta es una frase.")
to_en = es_blob.translate(from_lang="es", to="en")
assert_equal(to_en, "This is a sentence.")
@attr("requires_internet")
def test_translate_non_ascii(self):
blob = tb.TextBlob(unicode("ذات سيادة كاملة"))
translated = blob.translate(from_lang="ar", to="en")
assert_equal(translated, "With full sovereignty")
chinese_blob = tb.TextBlob(unicode("美丽优于丑陋"))
translated = chinese_blob.translate(from_lang="zh-CN", to='en')
assert_equal(translated, "Beautiful is better than ugly")
@attr("requires_internet")
def test_detect(self):
es_blob = tb.TextBlob("Hola")
assert_equal(es_blob.detect_language(), "es")
en_blob = tb.TextBlob("Hello")
assert_equal(en_blob.detect_language(), "en")
@attr("requires_internet")
def test_detect_non_ascii(self):
blob = tb.TextBlob(unicode("ذات سيادة كاملة"))
assert_equal(blob.detect_language(), "ar")
def test_correct(self):
blob = tb.TextBlob("I havv bad speling.")
assert_true(isinstance(blob.correct(), tb.TextBlob))
assert_equal(blob.correct(), tb.TextBlob("I have bad spelling."))
blob2 = tb.TextBlob("I am so exciited!!!")
assert_equal(blob2.correct(), "I am so excited!!!")
blob3 = tb.TextBlob("The meaning of life is 42.0.")
assert_equal(blob3.correct(), "The meaning of life is 42.0.")
blob4 = tb.TextBlob("?")
assert_equal(blob4.correct(), "?")
# From a user-submitted bug
text = "Before you embark on any of this journey, write a quick " + \
"high-level test that demonstrates the slowness. " + \
"You may need to introduce some minimum set of data to " + \
"reproduce a significant enough slowness."
blob5 = tb.TextBlob(text)
assert_equal(blob5.correct(), text)
text = "Word list! :\n" + \
"\t* spelling\n" + \
"\t* well"
blob6 = tb.TextBlob(text)
assert_equal(blob6.correct(), text)
def test_parse(self):
blob = tb.TextBlob("And now for something completely different.")
assert_equal(blob.parse(), PatternParser().parse(blob.string))
def test_passing_bad_init_params(self):
tagger = PatternTagger()
assert_raises(ValueError,
lambda: tb.TextBlob("blah", parser=tagger))
assert_raises(ValueError,
lambda: tb.TextBlob("blah", np_extractor=tagger))
assert_raises(ValueError,
lambda: tb.TextBlob("blah", tokenizer=tagger))
assert_raises(ValueError,
lambda: tb.TextBlob("blah", analyzer=tagger))
analyzer = PatternAnalyzer
assert_raises(ValueError,
lambda: tb.TextBlob("blah", pos_tagger=analyzer))
def test_classify(self):
blob = tb.TextBlob("This is an amazing library. What an awesome classifier!",
classifier=classifier)
assert_equal(blob.classify(), 'pos')
for s in blob.sentences:
assert_equal(s.classify(), 'pos')
def test_classify_without_classifier(self):
blob = tb.TextBlob("This isn't gonna be good")
assert_raises(NameError,
lambda: blob.classify())
class WordTest(TestCase):
def setUp(self):
self.cat = tb.Word('cat')
self.cats = tb.Word('cats')
def test_init(self):
tb.Word("cat")
assert_true(isinstance(self.cat, tb.Word))
word = tb.Word('cat', 'NN')
assert_equal(word.pos_tag, 'NN')
def test_singularize(self):
singular = self.cats.singularize()
assert_equal(singular, 'cat')
assert_equal(self.cat.singularize(), 'cat')
assert_true(isinstance(self.cat.singularize(), tb.Word))
def test_pluralize(self):
plural = self.cat.pluralize()
assert_equal(self.cat.pluralize(), 'cats')
assert_true(isinstance(plural, tb.Word))
def test_repr(self):
assert_equal(repr(self.cat), repr("cat"))
def test_str(self):
assert_equal(str(self.cat), 'cat')
def test_has_str_methods(self):
assert_equal(self.cat.upper(), "CAT")
assert_equal(self.cat.lower(), "cat")
assert_equal(self.cat[0:2], 'ca')
@attr('requires_internet')
def test_translate(self):
assert_equal(tb.Word("cat").translate(to="es"), "gato")
@attr('requires_internet')
def test_translate_without_from_lang(self):
assert_equal(tb.Word('hola').translate(), tb.Word('hello'))
@attr('requires_internet')
def test_detect_language(self):
assert_equal(tb.Word("bonjour").detect_language(), 'fr')
def test_spellcheck(self):
blob = tb.Word("speling")
suggestions = blob.spellcheck()
assert_equal(suggestions[0][0], "spelling")
def test_spellcheck_special_cases(self):
# Punctuation
assert_equal(tb.Word("!").spellcheck(), [("!", 1.0)])
# Numbers
assert_equal(tb.Word("42").spellcheck(), [("42", 1.0)])
assert_equal(tb.Word("12.34").spellcheck(), [("12.34", 1.0)])
# One-letter words
assert_equal(tb.Word("I").spellcheck(), [("I", 1.0)])
assert_equal(tb.Word("A").spellcheck(), [("A", 1.0)])
assert_equal(tb.Word("a").spellcheck(), [("a", 1.0)])
def test_correct(self):
w = tb.Word('speling')
correct = w.correct()
assert_equal(correct, tb.Word('spelling'))
assert_true(isinstance(correct, tb.Word))
@attr('slow')
def test_lemmatize(self):
w = tb.Word("cars")
assert_equal(w.lemmatize(), "car")
w = tb.Word("wolves")
assert_equal(w.lemmatize(), "wolf")
w = tb.Word("went")
assert_equal(w.lemmatize("v"), "go")
def test_lemma(self):
w = tb.Word("wolves")
assert_equal(w.lemma, "wolf")
w = tb.Word("went", "VBD");
assert_equal(w.lemma, "go")
def test_synsets(self):
w = tb.Word("car")
assert_true(isinstance(w.synsets, (list, tuple)))
assert_true(isinstance(w.synsets[0], Synset))
def test_synsets_with_pos_argument(self):
w = tb.Word("work")
noun_syns = w.get_synsets(pos=wn.NOUN)
for synset in noun_syns:
assert_equal(synset.pos, wn.NOUN)
def test_definitions(self):
w = tb.Word("octopus")
for definition in w.definitions:
print(type(definition))
assert_true(isinstance(definition, basestring))
def test_define(self):
w = tb.Word("hack")
synsets = w.get_synsets(wn.NOUN)
definitions = w.define(wn.NOUN)
assert_equal(len(synsets), len(definitions))
class TestWordnetInterface(TestCase):
def setUp(self):
pass
def test_synset(self):
syn = wn.Synset("dog.n.01")
word = tb.Word("dog")
assert_equal(word.synsets[0], syn)
def test_lemma(self):
lemma = wn.Lemma('eat.v.01.eat')
word = tb.Word("eat")
assert_equal(word.synsets[0].lemmas[0], lemma)
class BlobberTest(TestCase):
def setUp(self):
self.blobber = tb.Blobber() # The default blobber
def test_creates_blobs(self):
blob1 = self.blobber("this is one blob")
assert_true(isinstance(blob1, tb.TextBlob))
blob2 = self.blobber("another blob")
assert_equal(blob1.pos_tagger, blob2.pos_tagger)
def test_default_tagger(self):
blob = self.blobber("Some text")
assert_true(isinstance(blob.pos_tagger, PatternTagger))
def test_default_np_extractor(self):
blob = self.blobber("Some text")
assert_true(isinstance(blob.np_extractor, FastNPExtractor))
def test_default_tokenizer(self):
blob = self.blobber("Some text")
assert_true(isinstance(blob.tokenizer, WordTokenizer))
def test_str_and_repr(self):
expected = "Blobber(tokenizer=WordTokenizer(), pos_tagger=PatternTagger(), np_extractor=FastNPExtractor(), analyzer=PatternAnalyzer(), parser=PatternParser(), classifier=None)"
assert_equal(repr(self.blobber), expected)
assert_equal(str(self.blobber), repr(self.blobber))
def test_overrides(self):
b = tb.Blobber(tokenizer=SentenceTokenizer(),
np_extractor=ConllExtractor())
blob = b("How now? Brown cow?")
assert_true(isinstance(blob.tokenizer, SentenceTokenizer))
assert_equal(blob.tokens, tb.WordList(["How now?", "Brown cow?"]))
blob2 = b("Another blob")
# blobs have the same tokenizer
assert_true(blob.tokenizer is blob2.tokenizer)
# but aren't the same object
assert_not_equal(blob, blob2)
def test_override_analyzer(self):
b = tb.Blobber(analyzer=NaiveBayesAnalyzer())
blob = b("How now?")
blob2 = b("Brown cow")
assert_true(isinstance(blob.analyzer, NaiveBayesAnalyzer))
assert_true(blob.analyzer is blob2.analyzer)
def test_overrider_classifier(self):
b = tb.Blobber(classifier=classifier)
blob = b("I am so amazing")
assert_equal(blob.classify(), 'pos')
def is_blob(obj):
return isinstance(obj, tb.TextBlob)
if __name__ == '__main__':
main()
|
{
"content_hash": "ea6d9af30f25f3528715ad83d6960f0e",
"timestamp": "",
"source": "github",
"line_count": 1012,
"max_line_length": 291,
"avg_line_length": 38.55632411067194,
"alnum_prop": 0.6069863399882108,
"repo_name": "bbengfort/TextBlob",
"id": "e23473e27352d82e3bb01760e83b556fcd8f1814",
"size": "39096",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_blob.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import tweepy, requests, time
# from cowpy import cow
# cheese = cow.Milk()
class MyStreamListener(tweepy.StreamListener):
def __init__(self):
self.auth = tweepy.OAuthHandler('v8xVBZlXBp2AJoWI3VjDjzDkC', 'Wpoom4N6tpTgfywzCt6y83gvZubbYoT0vL0V8FXzhyXA74218D')
self.auth.set_access_token('734122414158708736-WijNUSxfi85hhqLGnaU8muQqInVugnE', 'PzXToKFTW0qErhvM4WIKerputvx5e0J1EM9aaObn5xNPJ')
self.api = tweepy.API(self.auth)
self.example = u'Polly Help:\n\n4253221077\nThe government is watching us at every moment\n[Canada/Intl]'
def on_direct_message(self, dm):
self.message = dm._json['direct_message']
text = unicode(self.message[u'text'])
inputs = text.split('\n')
user = unicode(self.message['sender']['screen_name'])
if user != u'MessagePolly':
if len(inputs) < 2:
# givem some help
self.dm(user, self.example)
self.log(u'Sending help to {}'.format(user))
else:
data = { 'number': inputs[0], 'message': inputs[1], 'from': user}
self.log(u'Sending "{message}" to {number} from @{from}'.format(**data))
# check if intl, us, or canada
if len(inputs) == 2:
r = requests.post('http://textbelt.com/text', data=data)
elif inputs[2] is not None:
if inputs[2].lower() == 'canada':
r = requests.post('http://textbelt.com/canada', data=data)
elif inputs[2].lower() == 'intl':
r = requests.post('http://textbelt.com/intl', data=data)
else:
self.log(u'Error: Unrecognized area: {}'.format(inputs[2]))
self.dm(user, u'Error: Unrecognized area: {}'.format(inputs[2]))
# check success and throw error if things didn't work out too well
try:
if r.json()['success']:
res = u'Your message ({message}) to {number} has been sent succesfully.'.format(**data)
self.log(res)
self.dm(user, res)
else:
error = u'Your text didn\'t send. {}'.format(r.json()['message'])
self.log(error)
self.dm(user, error)
except:
pass
def dm(self, user, dm):
try:
self.api.send_direct_message(user=user, text=dm)
except:
self.log(u'Error: Tried to send "{}..." to {}'.format(dm[0:8], user))
def log(self, m):
text = u"=> " + time.strftime("%Y-%m-%d %H:%M:%S") + u' {}'.format(m)
file = open('log_' + time.strftime("%m-%d-%Y")+'.log', mode='w')
file.write(text.encode('utf8'))
file.close()
print text
MyStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth=MyStreamListener.api.auth, listener=MyStreamListener)
print 'Running @MessagePolly...'
myStream.userstream()
|
{
"content_hash": "652c08a72d48316dc9aed02e201faca0",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 137,
"avg_line_length": 41.86486486486486,
"alnum_prop": 0.5355067785668173,
"repo_name": "Dabs4Dads/polly",
"id": "6aa95b356f80b4e2fcc8534e09acc9f4443aa0c0",
"size": "3098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3098"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import StreamingHttpResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from guacamole import client
import logging
import uuid
import threading
logger = logging.getLogger(__name__)
sockets = {}
sockets_lock = threading.RLock()
read_lock = threading.RLock()
write_lock = threading.RLock()
pending_read_request = threading.Event()
def index(request):
return render_to_response('index.html',
{},
context_instance=RequestContext(request))
@csrf_exempt
def tunnel(request):
qs = request.META['QUERY_STRING']
logger.info('tunnel %s', qs)
if qs == 'connect':
return _do_connect(request)
else:
tokens = qs.split(':')
if len(tokens) >= 2:
if tokens[0] == 'read':
return _do_read(request, tokens[1])
elif tokens[0] == 'write':
return _do_write(request, tokens[1])
return HttpResponse(status=400)
def _do_connect(request):
# Connect to guacd daemon
guac = client.GuacamoleClient()
guac.connect(protocol='vnc',
hostname='blargh.host.com',
port=5901,
password='password')
# guac.connect(protocol='ssh',
# hostname='blargh.host.com',
# port=22,
# username='user',
# password='password')
cache_key = str(uuid.uuid4())
with sockets_lock:
logger.info('Saving socket with key %s', cache_key)
sockets[cache_key] = guac
response = HttpResponse(content=cache_key)
response['Cache-Control'] = 'no-cache'
return response
def _do_read(request, cache_key):
pending_read_request.set()
def content():
with sockets_lock:
guac = sockets[cache_key]
with read_lock:
pending_read_request.clear()
while True:
content = guac.read()
if content:
yield content
else:
break
if pending_read_request.is_set():
logger.info('Letting another request take over.')
break
# End-of-instruction marker
yield '0.;'
response = StreamingHttpResponse(content(),
content_type='application/octet-stream')
response['Cache-Control'] = 'no-cache'
return response
def _do_write(request, cache_key):
with sockets_lock:
guac = sockets[cache_key]
with write_lock:
while True:
chunk = request.read(8192)
if chunk:
guac.write(chunk)
else:
break
response = HttpResponse(content_type='application/octet-stream')
response['Cache-Control'] = 'no-cache'
return response
|
{
"content_hash": "e2efcf966b2a4d8e2bdd2b992fc8edb4",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 77,
"avg_line_length": 26.81081081081081,
"alnum_prop": 0.5655241935483871,
"repo_name": "rescale/django-guacamole",
"id": "2a44521366f15049b97dd76067ceb47a6718656e",
"size": "2976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guacamole/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "829"
},
{
"name": "Python",
"bytes": "9739"
}
],
"symlink_target": ""
}
|
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
maxpFormat_0_5 = """
> # big endian
tableVersion: i
numGlyphs: H
"""
maxpFormat_1_0_add = """
> # big endian
maxPoints: H
maxContours: H
maxCompositePoints: H
maxCompositeContours: H
maxZones: H
maxTwilightPoints: H
maxStorage: H
maxFunctionDefs: H
maxInstructionDefs: H
maxStackElements: H
maxSizeOfInstructions: H
maxComponentElements: H
maxComponentDepth: H
"""
class table__m_a_x_p(DefaultTable.DefaultTable):
dependencies = ['glyf']
def decompile(self, data, ttFont):
dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
self.numGlyphs = int(self.numGlyphs)
if self.tableVersion != 0x00005000:
dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
assert len(data) == 0
def compile(self, ttFont):
if 'glyf' in ttFont:
if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:
self.recalc(ttFont)
else:
pass # CFF
self.numGlyphs = len(ttFont.getGlyphOrder())
if self.tableVersion != 0x00005000:
self.tableVersion = 0x00010000
data = sstruct.pack(maxpFormat_0_5, self)
if self.tableVersion == 0x00010000:
data = data + sstruct.pack(maxpFormat_1_0_add, self)
return data
def recalc(self, ttFont):
"""Recalculate the font bounding box, and most other maxp values except
for the TT instructions values. Also recalculate the value of bit 1
of the flags field and the font bounding box of the 'head' table.
"""
glyfTable = ttFont['glyf']
hmtxTable = ttFont['hmtx']
headTable = ttFont['head']
self.numGlyphs = len(glyfTable)
INFINITY = 100000
xMin = +INFINITY
yMin = +INFINITY
xMax = -INFINITY
yMax = -INFINITY
maxPoints = 0
maxContours = 0
maxCompositePoints = 0
maxCompositeContours = 0
maxComponentElements = 0
maxComponentDepth = 0
allXMinIsLsb = 1
for glyphName in ttFont.getGlyphOrder():
g = glyfTable[glyphName]
if g.numberOfContours:
if hmtxTable[glyphName][1] != g.xMin:
allXMinIsLsb = 0
xMin = min(xMin, g.xMin)
yMin = min(yMin, g.yMin)
xMax = max(xMax, g.xMax)
yMax = max(yMax, g.yMax)
if g.numberOfContours > 0:
nPoints, nContours = g.getMaxpValues()
maxPoints = max(maxPoints, nPoints)
maxContours = max(maxContours, nContours)
else:
nPoints, nContours, componentDepth = g.getCompositeMaxpValues(glyfTable)
maxCompositePoints = max(maxCompositePoints, nPoints)
maxCompositeContours = max(maxCompositeContours, nContours)
maxComponentElements = max(maxComponentElements, len(g.components))
maxComponentDepth = max(maxComponentDepth, componentDepth)
if xMin == +INFINITY:
headTable.xMin = 0
headTable.yMin = 0
headTable.xMax = 0
headTable.yMax = 0
else:
headTable.xMin = xMin
headTable.yMin = yMin
headTable.xMax = xMax
headTable.yMax = yMax
self.maxPoints = maxPoints
self.maxContours = maxContours
self.maxCompositePoints = maxCompositePoints
self.maxCompositeContours = maxCompositeContours
self.maxComponentElements = maxComponentElements
self.maxComponentDepth = maxComponentDepth
if allXMinIsLsb:
headTable.flags = headTable.flags | 0x2
else:
headTable.flags = headTable.flags & ~0x2
def testrepr(self):
items = sorted(self.__dict__.items())
print(". . . . . . . . .")
for combo in items:
print(" %s: %s" % combo)
print(". . . . . . . . .")
def toXML(self, writer, ttFont):
if self.tableVersion != 0x00005000:
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
if self.tableVersion != 0x00005000:
formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
names = names + names_1_0
for name in names:
value = getattr(self, name)
if name == "tableVersion":
value = hex(value)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
setattr(self, name, safeEval(attrs["value"]))
|
{
"content_hash": "f0f222abd22679c7f16f9ccdd2343acf",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 77,
"avg_line_length": 30.369565217391305,
"alnum_prop": 0.684800763540921,
"repo_name": "fonttools/fonttools",
"id": "e810806dcff1aa15e1abe21f885efcc4ca78130e",
"size": "4191",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "Lib/fontTools/ttLib/tables/_m_a_x_p.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3522"
},
{
"name": "Makefile",
"bytes": "352"
},
{
"name": "Python",
"bytes": "5442538"
}
],
"symlink_target": ""
}
|
'''
Created on Feb 5, 2015
@author: jeremie
'''
import abc
class CombiningEventMixinMetaclass(type):
"""
Metaclass to allow union of _eventMixin_events attributes of base classes
instead of overwriting them.
"""
def __new__(cls, name, bases, attrs):
_eventMixin_events = set(attrs.get('_eventMixin_events', list()))
for base in bases:
_eventMixin_events.update(getattr(base,'_eventMixin_events', list()))
attrs['_eventMixin_events'] = _eventMixin_events
return type.__new__(cls, name, bases, attrs)
class AbstractCombiningEventMixinMetaclass(abc.ABCMeta):
def __new__(cls, name, bases, attrs):
_eventMixin_events = set(attrs.get('_eventMixin_events', list()))
for base in bases:
_eventMixin_events.update(getattr(base,'_eventMixin_events', list()))
attrs['_eventMixin_events'] = _eventMixin_events
return abc.ABCMeta.__new__(cls, name, bases, attrs)
|
{
"content_hash": "0217dfaaadb36f68eff5429560f1e2ca",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 33.81481481481482,
"alnum_prop": 0.6867469879518072,
"repo_name": "jmiserez/sts",
"id": "a66cffd1df87cbbdf1c4fa61fbd09962a5ecec01",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/hb",
"path": "sts/util/revent_mixins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1167857"
},
{
"name": "Shell",
"bytes": "16594"
}
],
"symlink_target": ""
}
|
import unittest
from cloudbaseinit.utils import crypt
class TestOpenSSLException(unittest.TestCase):
def setUp(self):
self._openssl = crypt.OpenSSLException()
def test_get_openssl_error_msg(self):
expected_error_msg = u'error:00000000:lib(0):func(0):reason(0)'
error_msg = self._openssl._get_openssl_error_msg()
self.assertEqual(expected_error_msg, error_msg)
class TestCryptManager(unittest.TestCase):
def setUp(self):
self._crypt_manager = crypt.CryptManager()
def test_load_ssh_rsa_public_key_invalid(self):
ssh_pub_key = "ssh"
exc = Exception
self.assertRaises(exc, self._crypt_manager.load_ssh_rsa_public_key,
ssh_pub_key)
|
{
"content_hash": "73ee371532c2011add315b5da0b02d1b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 28.576923076923077,
"alnum_prop": 0.65814266487214,
"repo_name": "ader1990/cloudbase-init",
"id": "565f4aeb87ad985cfea8456e224205d5e9f09953",
"size": "1359",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cloudbaseinit/tests/utils/test_crypt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1245243"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def define_flags():
############
# Run mode
############
tf.app.flags.DEFINE_string('run', None, "Which operation to run. [train|inference]")
##########################
# Training parameters
###########################
tf.app.flags.DEFINE_integer('nb_epoch', 400, "Number of epochs")
tf.app.flags.DEFINE_integer('batch_size', 64, "Number of samples per batch.")
tf.app.flags.DEFINE_integer('nb_batch_per_epoch', 500, "Number of batches per epoch")
tf.app.flags.DEFINE_float('learning_rate', 2E-4, "Learning rate used for AdamOptimizer")
tf.app.flags.DEFINE_integer('noise_dim', 100, "Noise dimension for GAN generation")
tf.app.flags.DEFINE_integer('random_seed', 0, "Seed used to initialize rng.")
############################################
# General tensorflow parameters parameters
#############################################
tf.app.flags.DEFINE_bool('use_XLA', False, "Whether to use XLA compiler.")
tf.app.flags.DEFINE_integer('num_threads', 2, "Number of threads to fetch the data")
tf.app.flags.DEFINE_float('capacity_factor', 32, "Nuumber of batches to store in queue")
##########
# Datasets
##########
tf.app.flags.DEFINE_string('data_format', "NCHW", "Tensorflow image data format.")
tf.app.flags.DEFINE_string('celebA_path', "../../data/raw/img_align_celeba", "Path to celebA images")
tf.app.flags.DEFINE_integer('channels', 3, "Number of channels")
tf.app.flags.DEFINE_float('central_fraction', 0.8, "Central crop as a fraction of total image")
tf.app.flags.DEFINE_integer('img_size', 64, "Image size")
##############
# Directories
##############
tf.app.flags.DEFINE_string('model_dir', '../../models', "Output folder where checkpoints are dumped.")
tf.app.flags.DEFINE_string('log_dir', '../../logs', "Logs for tensorboard.")
tf.app.flags.DEFINE_string('fig_dir', '../../figures', "Where to save figures.")
tf.app.flags.DEFINE_string('raw_dir', '../../data/raw', "Where raw data is saved")
tf.app.flags.DEFINE_string('data_dir', '../../data/processed', "Where processed data is saved")
|
{
"content_hash": "5108e4b7e8efa484f6f1f61ca7682359",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 106,
"avg_line_length": 47.869565217391305,
"alnum_prop": 0.6030881017257039,
"repo_name": "tdeboissiere/DeepLearningImplementations",
"id": "f712efe144ad625bebe21fb5d3a4946f8efaa3af",
"size": "2203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GAN_tf/src/model/flags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "448200"
},
{
"name": "Shell",
"bytes": "2163"
}
],
"symlink_target": ""
}
|
"""Support for Homekit cameras."""
from __future__ import annotations
from aiohomekit.model.services import ServicesTypes
from homeassistant.components.camera import Camera
from homeassistant.core import callback
from . import KNOWN_DEVICES, AccessoryEntity
class HomeKitCamera(AccessoryEntity, Camera):
"""Representation of a Homekit camera."""
# content_type = "image/jpeg"
def get_characteristic_types(self):
"""Define the homekit characteristics the entity is tracking."""
return []
@property
def state(self):
"""Return the current state of the camera."""
return "idle"
async def async_camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return a jpeg with the current camera snapshot."""
return await self._accessory.pairing.image(
self._aid,
width or 640,
height or 480,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit sensors."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_accessory(accessory):
stream_mgmt = accessory.services.first(
service_type=ServicesTypes.CAMERA_RTP_STREAM_MANAGEMENT
)
if not stream_mgmt:
return
info = {"aid": accessory.aid, "iid": stream_mgmt.iid}
async_add_entities([HomeKitCamera(conn, info)], True)
return True
conn.add_accessory_factory(async_add_accessory)
|
{
"content_hash": "53826f8a5e83b2dab1a96ffe901d779d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 72,
"avg_line_length": 29.296296296296298,
"alnum_prop": 0.6485461441213654,
"repo_name": "sander76/home-assistant",
"id": "a0b15087356cdbb4851f35a2d79cd516db22c90e",
"size": "1582",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit_controller/camera.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
from django import template
from django.template.defaultfilters import escape
from django.utils.safestring import mark_safe
from workshops.models import TrainingProgress
register = template.Library()
@register.simple_tag
def progress_label(progress):
assert isinstance(progress, TrainingProgress)
if progress.discarded:
additional_label = "dark"
else:
switch = {
"n": "warning",
"f": "danger",
"a": "info",
"p": "success",
}
additional_label = switch[progress.state]
fmt = "badge badge-{}".format(additional_label)
return mark_safe(fmt)
@register.simple_tag
def progress_description(progress):
assert isinstance(progress, TrainingProgress)
text = "{discarded}{state} {type}<br />{evaluated_by}<br />on {day}.{notes}".format(
discarded="discarded " if progress.discarded else "",
state=progress.get_state_display(),
type=progress.requirement,
evaluated_by=(
"evaluated by {}".format(progress.evaluated_by.full_name)
if progress.evaluated_by is not None
else "submitted"
),
day=progress.created_at.strftime("%A %d %B %Y at %H:%M"),
notes="<br />Notes: {}".format(escape(progress.notes))
if progress.notes
else "",
)
text = text[0].upper() + text[1:]
return mark_safe(text)
|
{
"content_hash": "c02a43cb6f85971b0d7e6439205d61fb",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 88,
"avg_line_length": 28.755102040816325,
"alnum_prop": 0.6188786373314408,
"repo_name": "pbanaszkiewicz/amy",
"id": "35f8f4662af0f2b0d23489d2884d0175076cdafb",
"size": "1409",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "amy/workshops/templatetags/training_progress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5850"
},
{
"name": "Dockerfile",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "313293"
},
{
"name": "JavaScript",
"bytes": "39427"
},
{
"name": "Makefile",
"bytes": "1780"
},
{
"name": "Python",
"bytes": "2707815"
}
],
"symlink_target": ""
}
|
from ObjectCreationParameters import ObjectCreationParameters
class ResponsibilityParameters(ObjectCreationParameters):
def __init__(self,name):
ObjectCreationParameters.__init__(self)
self.theName = name
def name(self): return self.theName
|
{
"content_hash": "2a4632e8d52d353b1e3ac817fa7e1a69",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 61,
"avg_line_length": 31.875,
"alnum_prop": 0.788235294117647,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "945574a3d2cea43b2221e26ac4b10bee41617ee9",
"size": "1054",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/ResponsibilityParameters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
def weights_and_biases():
print("Test checks if Deep Learning weights and biases are accessible from R")
covtype = h2o.upload_file(pyunit_utils.locate("smalldata/covtype/covtype.20k.data"))
covtype[54] = covtype[54].asfactor()
dlmodel = H2ODeepLearningEstimator(hidden=[17,191],
epochs=1,
balance_classes=False,
reproducible=True,
seed=1234,
export_weights_and_biases=True)
dlmodel.train(x=list(range(54)),y=54,training_frame=covtype)
print(dlmodel)
weights1 = dlmodel.weights(0)
weights2 = dlmodel.weights(1)
weights3 = dlmodel.weights(2)
biases1 = dlmodel.biases(0)
biases2 = dlmodel.biases(1)
biases3 = dlmodel.biases(2)
w1c = weights1.ncol
w1r = weights1.nrow
assert w1c == 52, "wrong dimensionality! expected {0}, but got {1}.".format(52, w1c)
assert w1r == 17, "wrong dimensionality! expected {0}, but got {1}.".format(17, w1r)
w2c = weights2.ncol
w2r = weights2.nrow
assert w2c == 17, "wrong dimensionality! expected {0}, but got {1}.".format(17, w2c)
assert w2r == 191, "wrong dimensionality! expected {0}, but got {1}.".format(191, w2r)
w3c = weights3.ncol
w3r = weights3.nrow
assert w3c == 191, "wrong dimensionality! expected {0}, but got {1}.".format(191, w3c)
assert w3r == 7, "wrong dimensionality! expected {0}, but got {1}.".format(7, w3r)
b1c = biases1.ncol
b1r = biases1.nrow
assert b1c == 1, "wrong dimensionality! expected {0}, but got {1}.".format(1, b1c)
assert b1r == 17, "wrong dimensionality! expected {0}, but got {1}.".format(17, b1r)
b2c = biases2.ncol
b2r = biases2.nrow
assert b2c == 1, "wrong dimensionality! expected {0}, but got {1}.".format(1, b2c)
assert b2r == 191, "wrong dimensionality! expected {0}, but got {1}.".format(191, b2r)
b3c = biases3.ncol
b3r = biases3.nrow
assert b3c == 1, "wrong dimensionality! expected {0}, but got {1}.".format(1, b3c)
assert b3r == 7, "wrong dimensionality! expected {0}, but got {1}.".format(7, b3r)
df = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris.csv"))
dl1 = H2ODeepLearningEstimator(hidden=[10,10], export_weights_and_biases=True)
dl1.train(x=list(range(4)), y=4, training_frame=df)
p1 = dl1.predict(df)
ll1 = dl1.model_performance(df).logloss()
print(ll1)
## get weights and biases
w1 = dl1.weights(0)
w2 = dl1.weights(1)
w3 = dl1.weights(2)
b1 = dl1.biases(0)
b2 = dl1.biases(1)
b3 = dl1.biases(2)
## make a model from given weights/biases
dl2 = H2ODeepLearningEstimator(hidden=[10,10], initial_weights=[w1, w2, w3], initial_biases=[b1, b2, b3], epochs=0)
dl2.train(x=list(range(4)), y=4, training_frame=df)
p2 = dl2.predict(df)
ll2 = dl2.model_performance(df).logloss()
print(ll2)
# h2o.download_pojo(dl2) ## fully functional pojo
## check consistency
assert abs(p1[:,1:4]-p2[:,1:4]).max() < 1e-6
assert abs(ll2 - ll1) < 1e-6
## make another model with partially set weights/biases
dl3 = H2ODeepLearningEstimator(hidden=[10,10], initial_weights=[w1, None, w3], initial_biases=[b1, b2, None], epochs=10)
dl3.train(x=list(range(4)), y=4, training_frame=df)
ll3 = dl3.model_performance(df).logloss()
## make another model with partially set user-modified weights/biases
dl4 = H2ODeepLearningEstimator(hidden=[10,10], initial_weights=[w1*1.1,w2*0.9,w3.sqrt()], initial_biases=[b1, b2, None], epochs=10)
dl4.train(x=list(range(4)), y=4, training_frame=df)
ll4 = dl4.model_performance(df).logloss()
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_and_biases)
else:
weights_and_biases()
|
{
"content_hash": "31847d8996c9c9eeacdf9ddbd8170dee",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 133,
"avg_line_length": 37.12149532710281,
"alnum_prop": 0.6555891238670695,
"repo_name": "spennihana/h2o-3",
"id": "b093555200b3d49f6f14eb3437a45b90a742b53e",
"size": "3972",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/deeplearning/pyunit_weights_and_biases_deeplearning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "257122"
},
{
"name": "CoffeeScript",
"bytes": "273112"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "125187"
},
{
"name": "HTML",
"bytes": "2111506"
},
{
"name": "Java",
"bytes": "9481047"
},
{
"name": "JavaScript",
"bytes": "87944"
},
{
"name": "Jupyter Notebook",
"bytes": "6165027"
},
{
"name": "Makefile",
"bytes": "42233"
},
{
"name": "Python",
"bytes": "4982123"
},
{
"name": "R",
"bytes": "2699289"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "32768"
},
{
"name": "Shell",
"bytes": "179758"
},
{
"name": "TeX",
"bytes": "657375"
}
],
"symlink_target": ""
}
|
from __future__ import division
import logging
import os
import subprocess
import shutil
import sys
import tempfile
try:
import numpy as np
except ImportError:
logging.critical('Cannot import the third-party Python package numpy')
sys.exit(1)
from . import signal_processing
class ExternalVad(object):
def __init__(self, path_to_binary, name):
"""Args:
path_to_binary: path to binary that accepts '-i <wav>', '-o
<float probabilities>'. There must be one float value per
10ms audio
name: a name to identify the external VAD. Used for saving
the output as extvad_output-<name>.
"""
self._path_to_binary = path_to_binary
self.name = name
assert os.path.exists(self._path_to_binary), (
self._path_to_binary)
self._vad_output = None
def Run(self, wav_file_path):
_signal = signal_processing.SignalProcessingUtils.LoadWav(wav_file_path)
if _signal.channels != 1:
raise NotImplementedError('Multiple-channel'
' annotations not implemented')
if _signal.frame_rate != 48000:
raise NotImplementedError('Frame rates '
'other than 48000 not implemented')
tmp_path = tempfile.mkdtemp()
try:
output_file_path = os.path.join(
tmp_path, self.name + '_vad.tmp')
subprocess.call([
self._path_to_binary,
'-i', wav_file_path,
'-o', output_file_path
])
self._vad_output = np.fromfile(output_file_path, np.float32)
except Exception as e:
logging.error('Error while running the ' + self.name +
' VAD (' + e.message + ')')
finally:
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
def GetVadOutput(self):
assert self._vad_output is not None
return self._vad_output
@classmethod
def ConstructVadDict(cls, vad_paths, vad_names):
external_vads = {}
for path, name in zip(vad_paths, vad_names):
external_vads[name] = ExternalVad(path, name)
return external_vads
|
{
"content_hash": "90a3f5ac3f0d1a839c24970ae115f42c",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 30.057971014492754,
"alnum_prop": 0.6258437801350049,
"repo_name": "koobonil/Boss2D",
"id": "01418d84fe0d69dbb0a398d3901831da1bd5072f",
"size": "2468",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Boss2D/addon/webrtc-jumpingyang001_for_boss/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "4820445"
},
{
"name": "Awk",
"bytes": "4272"
},
{
"name": "Batchfile",
"bytes": "89930"
},
{
"name": "C",
"bytes": "119747922"
},
{
"name": "C#",
"bytes": "87505"
},
{
"name": "C++",
"bytes": "272329620"
},
{
"name": "CMake",
"bytes": "1199656"
},
{
"name": "CSS",
"bytes": "42679"
},
{
"name": "Clojure",
"bytes": "1487"
},
{
"name": "Cuda",
"bytes": "1651996"
},
{
"name": "DIGITAL Command Language",
"bytes": "239527"
},
{
"name": "Dockerfile",
"bytes": "9638"
},
{
"name": "Emacs Lisp",
"bytes": "15570"
},
{
"name": "Go",
"bytes": "858185"
},
{
"name": "HLSL",
"bytes": "3314"
},
{
"name": "HTML",
"bytes": "2958385"
},
{
"name": "Java",
"bytes": "2921052"
},
{
"name": "JavaScript",
"bytes": "178190"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "M4",
"bytes": "775724"
},
{
"name": "MATLAB",
"bytes": "74606"
},
{
"name": "Makefile",
"bytes": "3941551"
},
{
"name": "Meson",
"bytes": "2847"
},
{
"name": "Module Management System",
"bytes": "2626"
},
{
"name": "NSIS",
"bytes": "4505"
},
{
"name": "Objective-C",
"bytes": "4090702"
},
{
"name": "Objective-C++",
"bytes": "1702390"
},
{
"name": "PHP",
"bytes": "3530"
},
{
"name": "Perl",
"bytes": "11096338"
},
{
"name": "Perl 6",
"bytes": "11802"
},
{
"name": "PowerShell",
"bytes": "38571"
},
{
"name": "Python",
"bytes": "24123805"
},
{
"name": "QMake",
"bytes": "18188"
},
{
"name": "Roff",
"bytes": "1261269"
},
{
"name": "Ruby",
"bytes": "5890"
},
{
"name": "Scala",
"bytes": "5683"
},
{
"name": "Shell",
"bytes": "2879948"
},
{
"name": "TeX",
"bytes": "243507"
},
{
"name": "TypeScript",
"bytes": "1593696"
},
{
"name": "Verilog",
"bytes": "1215"
},
{
"name": "Vim Script",
"bytes": "3759"
},
{
"name": "Visual Basic",
"bytes": "16186"
},
{
"name": "eC",
"bytes": "9705"
}
],
"symlink_target": ""
}
|
import cpp11_initializer_list_extend
c = cpp11_initializer_list_extend.Container([10, 20, 30, 40])
|
{
"content_hash": "8d3c08712b56b7263a514e7196339117",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 61,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.76,
"repo_name": "DEKHTIARJonathan/BilletterieUTC",
"id": "285273efaabac6200db64fe5f0e9ad734f8604d4",
"size": "100",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "badgingServer/Install/swigwin-3.0.7/Examples/test-suite/python/cpp11_initializer_list_extend_runme.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "505"
},
{
"name": "C",
"bytes": "1489570"
},
{
"name": "C#",
"bytes": "323243"
},
{
"name": "C++",
"bytes": "2646678"
},
{
"name": "CSS",
"bytes": "1309792"
},
{
"name": "Common Lisp",
"bytes": "13780"
},
{
"name": "D",
"bytes": "260374"
},
{
"name": "DIGITAL Command Language",
"bytes": "16078"
},
{
"name": "Forth",
"bytes": "2411"
},
{
"name": "Go",
"bytes": "95670"
},
{
"name": "Groff",
"bytes": "17548"
},
{
"name": "HTML",
"bytes": "8474268"
},
{
"name": "Java",
"bytes": "517584"
},
{
"name": "JavaScript",
"bytes": "1574272"
},
{
"name": "Limbo",
"bytes": "2902"
},
{
"name": "Lua",
"bytes": "103853"
},
{
"name": "M",
"bytes": "58261"
},
{
"name": "Makefile",
"bytes": "193313"
},
{
"name": "Mathematica",
"bytes": "113"
},
{
"name": "Matlab",
"bytes": "49071"
},
{
"name": "Mercury",
"bytes": "4136"
},
{
"name": "OCaml",
"bytes": "25948"
},
{
"name": "Objective-C",
"bytes": "9721"
},
{
"name": "PHP",
"bytes": "336290"
},
{
"name": "Perl",
"bytes": "140021"
},
{
"name": "Perl6",
"bytes": "6403"
},
{
"name": "Pike",
"bytes": "6601"
},
{
"name": "Python",
"bytes": "271706"
},
{
"name": "R",
"bytes": "6053"
},
{
"name": "Ruby",
"bytes": "129514"
},
{
"name": "SQLPL",
"bytes": "10237"
},
{
"name": "Scheme",
"bytes": "81765"
},
{
"name": "Scilab",
"bytes": "84725"
},
{
"name": "Shell",
"bytes": "86284"
},
{
"name": "Standard ML",
"bytes": "2587"
},
{
"name": "Tcl",
"bytes": "38028"
},
{
"name": "Yacc",
"bytes": "211262"
}
],
"symlink_target": ""
}
|
import string
from urllib import urlencode
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
import commonware.log
from oauthlib import oauth1
from oauthlib.common import safe_string_equals
from amo.decorators import login_required
from amo.utils import urlparams
from mkt.api.models import Access, Nonce, Token, REQUEST_TOKEN, ACCESS_TOKEN
DUMMY_CLIENT_KEY = u'DummyOAuthClientKeyString'
DUMMY_TOKEN = u'DummyOAuthToken'
DUMMY_SECRET = u'DummyOAuthSecret'
log = commonware.log.getLogger('z.api')
class OAuthServer(oauth1.Server):
safe_characters = set(string.printable)
nonce_length = (7, 128)
access_token_length = (8, 128)
request_token_length = (8, 128)
verifier_length = (8, 128)
client_key_length = (8, 128)
enforce_ssl = False # SSL enforcement is handled by ops. :-)
def validate_client_key(self, key):
self.attempted_key = key
return Access.objects.filter(key=key).exists()
def get_client_secret(self, key):
# This method returns a dummy secret on failure so that auth
# success and failure take a codepath with the same run time,
# to prevent timing attacks.
try:
# OAuthlib needs unicode objects, django-aesfield returns a string.
return Access.objects.get(key=key).secret.decode('utf8')
except Access.DoesNotExist:
return DUMMY_SECRET
@property
def dummy_client(self):
return DUMMY_CLIENT_KEY
@property
def dummy_request_token(self):
return DUMMY_TOKEN
@property
def dummy_access_token(self):
return DUMMY_TOKEN
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request_token=None, access_token=None):
n, created = Nonce.objects.safer_get_or_create(
defaults={'client_key': client_key},
nonce=nonce, timestamp=timestamp,
request_token=request_token,
access_token=access_token)
return created
def validate_requested_realm(self, client_key, realm):
return True
def validate_realm(self, client_key, access_token, uri=None,
required_realm=None):
return True
def validate_redirect_uri(self, client_key, redirect_uri):
return True
def validate_request_token(self, client_key, request_token):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
return Token.objects.filter(token_type=REQUEST_TOKEN,
creds__key=client_key,
key=request_token).exists()
def validate_access_token(self, client_key, access_token):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
return Token.objects.filter(token_type=ACCESS_TOKEN,
creds__key=client_key,
key=access_token).exists()
def validate_verifier(self, client_key, request_token, verifier):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
try:
t = Token.objects.get(key=request_token, token_type=REQUEST_TOKEN)
candidate = t.verifier
except Token.DoesNotExist:
candidate = ''
return safe_string_equals(candidate, verifier)
def get_request_token_secret(self, client_key, request_token):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
try:
t = Token.objects.get(key=request_token, creds__key=client_key,
token_type=REQUEST_TOKEN)
return t.secret
except Token.DoesNotExist:
return DUMMY_SECRET
def get_access_token_secret(self, client_key, request_token):
# This method must take the same amount of time/db lookups for
# success and failure to prevent timing attacks.
try:
t = Token.objects.get(key=request_token, creds__key=client_key,
token_type=ACCESS_TOKEN)
except Token.DoesNotExist:
return DUMMY_SECRET
return t.secret
@csrf_exempt
def access_request(request):
oa = OAuthServer()
try:
valid, oauth_request = oa.verify_access_token_request(
request.build_absolute_uri(),
request.method,
request.body,
{'Authorization': request.META.get('HTTP_AUTHORIZATION'),
'Content-Type': request.META.get('CONTENT_TYPE')
})
except ValueError:
valid = False
if valid:
req_t = Token.objects.get(
token_type=REQUEST_TOKEN,
key=oauth_request.resource_owner_key)
t = Token.generate_new(
token_type=ACCESS_TOKEN,
creds=req_t.creds,
user=req_t.user)
# Clean up as we go.
req_t.delete()
return HttpResponse(
urlencode({'oauth_token': t.key,
'oauth_token_secret': t.secret}),
content_type='application/x-www-form-urlencoded')
else:
log.error('Invalid OAuth request for acquiring access token')
return HttpResponse(status=401)
@csrf_exempt
def token_request(request):
oa = OAuthServer()
try:
valid, oauth_request = oa.verify_request_token_request(
request.build_absolute_uri(),
request.method,
request.body,
{'Authorization': request.META.get('HTTP_AUTHORIZATION'),
'Content-Type': request.META.get('CONTENT_TYPE')
})
except ValueError:
valid = False
if valid:
consumer = Access.objects.get(key=oauth_request.client_key)
t = Token.generate_new(token_type=REQUEST_TOKEN, creds=consumer)
return HttpResponse(
urlencode({'oauth_token': t.key,
'oauth_token_secret': t.secret,
'oauth_callback_confirmed': True}),
content_type='application/x-www-form-urlencoded')
else:
log.error('Invalid OAuth request for acquiring request token')
return HttpResponse(status=401)
@csrf_exempt
@login_required
def authorize(request):
if request.method == 'GET' and 'oauth_token' in request.GET:
try:
t = Token.objects.get(token_type=REQUEST_TOKEN,
key=request.GET['oauth_token'])
except Token.DoesNotExist:
log.error('Invalid OAuth request for obtaining user authorization')
return HttpResponse(status=401)
return render(request, 'developers/oauth_authorize.html',
{'app_name': t.creds.app_name,
'oauth_token': request.GET['oauth_token']})
elif request.method == 'POST':
token = request.POST.get('oauth_token')
try:
t = Token.objects.get(token_type=REQUEST_TOKEN,
key=token)
except Token.DoesNotExist:
return HttpResponse(status=401)
if 'grant' in request.POST:
t.user = request.user
t.save()
return HttpResponseRedirect(
urlparams(t.creds.redirect_uri, oauth_token=token,
oauth_verifier=t.verifier))
elif 'deny' in request.POST:
t.delete()
return HttpResponse(status=200)
else:
log.error('Invalid OAuth request for user access authorization')
return HttpResponse(status=401)
|
{
"content_hash": "ff0cbebb282d16a67e31acb4aee640cd",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 79,
"avg_line_length": 37.08490566037736,
"alnum_prop": 0.6087509539557364,
"repo_name": "robhudson/zamboni",
"id": "834469d6b9f22ad29fe2a65059868b3fab5ab887",
"size": "7862",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mkt/api/oauth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "900136"
},
{
"name": "JavaScript",
"bytes": "1700376"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6317591"
},
{
"name": "Shell",
"bytes": "20633"
}
],
"symlink_target": ""
}
|
"""TensorFlow Lite tooling helper functionality.
EXPERIMENTAL: APIs here are unstable and likely to change without notice.
@@toco_convert
@@toco_convert_protos
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
import tempfile
from tensorflow.contrib.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.contrib.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.contrib.lite.toco.python.tensorflow_wrap_toco import TocoConvert as _toco_convert_protos
from tensorflow.python.framework import dtypes as _dtypes
# from tensorflow.python.platform import
# resource_loader as _resource_loader
# Enum types from the protobuf promoted to the API
FLOAT = _toco_flags_pb2.FLOAT
INT32 = _toco_flags_pb2.INT32
INT64 = _toco_flags_pb2.INT64
STRING = _toco_flags_pb2.STRING
QUANTIZED_UINT8 = _toco_flags_pb2.QUANTIZED_UINT8
TENSORFLOW_GRAPHDEF = _toco_flags_pb2.TENSORFLOW_GRAPHDEF
TFLITE = _toco_flags_pb2.TFLITE
GRAPHVIZ_DOT = _toco_flags_pb2.GRAPHVIZ_DOT
# Currently the default mode of operation is to shell to another python process
# to protect against crashes.
EXPERIMENTAL_USE_TOCO_API_DIRECTLY = True
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
# toco_from_proto_bin = _resource_loader.get_path_to_datafile(
# "../toco/python/toco_from_protos")
# if not os.path.exists(toco_from_proto_bin):
# toco_from_proto_bin = "toco_from_protos"
def toco_convert_protos(model_flags_str, toco_flags_str, input_data_str):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly @{tf.contrib.lite.toco_convert}}.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
return _toco_convert_protos(model_flags_str, toco_flags_str, input_data_str)
# with tempfile.NamedTemporaryFile() as fp_toco, \
# tempfile.NamedTemporaryFile() as fp_model, \
# tempfile.NamedTemporaryFile() as fp_input, \
# tempfile.NamedTemporaryFile() as fp_output:
# fp_model.write(model_flags_str)
# fp_toco.write(toco_flags_str)
# fp_input.write(input_data_str)
# fp_model.flush()
# fp_toco.flush()
# fp_input.flush()
# cmd = [
# toco_from_proto_bin, fp_model.name, fp_toco.name, fp_input.name,
# fp_output.name
# ]
# cmdline = " ".join(cmd)
# proc = subprocess.Popen(
# cmdline,
# shell=True,
# stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT,
# close_fds=True)
# stdout, stderr = proc.communicate()
# exitcode = proc.returncode
# if exitcode == 0:
# stuff = fp_output.read()
# return stuff
# else:
# raise RuntimeError("TOCO failed see console for info.\n%s\n%s\n" %
# (stdout, stderr))
def _tensor_name(x):
return x.name.split(":")[0]
def toco_convert(input_data,
input_tensors,
output_tensors,
inference_type=FLOAT,
input_format=TENSORFLOW_GRAPHDEF,
output_format=TFLITE,
quantized_input_stats=None,
drop_control_dependency=True):
"""Convert a model using TOCO from `input_format` to `output_format`.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_data: Input data (i.e. often `sess.graph_def`).
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Currently must be `{FLOAT, QUANTIZED_UINT8}`.
input_format: Type of data to read (currently must be TENSORFLOW_GRAPHDEF).
output_format: Type of data to write (currently must be TFLITE or
GRAPHVIZ_DOT)
quantized_input_stats: For each member of input_tensors the mean and
std deviation of training data. Only needed if `inference_type` is
`QUANTIZED_UINT8`.
drop_control_dependency: Drops control dependencies silently. This is due
to tf lite not supporting control dependencies.
Returns:
The converted data. For example if tflite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
ValueError: If the input tensor type is unknown
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
model = _model_flags_pb2.ModelFlags()
model.drop_control_dependency = drop_control_dependency
toco.inference_type = inference_type
for idx, input_tensor in enumerate(input_tensors):
if input_tensor.dtype == _dtypes.float32:
tflite_input_type = FLOAT
elif input_tensor.dtype == _dtypes.int32:
tflite_input_type = INT32
elif input_tensor.dtype == _dtypes.int64:
tflite_input_type = INT64
# TODO(aselle): Insert strings when they are available
else:
raise ValueError("Tensors %s not known type %r" % (input_tensor.name,
input_tensor.dtype))
input_array = model.input_arrays.add()
if inference_type == QUANTIZED_UINT8:
if tflite_input_type == FLOAT:
tflite_input_type = QUANTIZED_UINT8
input_array.mean, input_array.std = quantized_input_stats[idx]
input_array.name = _tensor_name(input_tensor)
input_array.shape.extend(map(int, input_tensor.get_shape()))
toco.input_types.append(tflite_input_type)
for output_tensor in output_tensors:
model.output_arrays.append(_tensor_name(output_tensor))
data = toco_convert_protos(model.SerializeToString(),
toco.SerializeToString(),
input_data.SerializeToString())
return data
# remove_undocumented(__name__)
del os
del subprocess
del tempfile
|
{
"content_hash": "04f67e7921010abaf88ef510ddc186bc",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 104,
"avg_line_length": 36.821621621621624,
"alnum_prop": 0.6861421021726365,
"repo_name": "horance-liu/tensorflow",
"id": "5e8edbb93767c67e3a6cc96f5a76a945d92cf2ab",
"size": "7501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/lite/python/lite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8572"
},
{
"name": "C",
"bytes": "314095"
},
{
"name": "C++",
"bytes": "34056582"
},
{
"name": "CMake",
"bytes": "212134"
},
{
"name": "Go",
"bytes": "1005949"
},
{
"name": "Java",
"bytes": "533059"
},
{
"name": "Jupyter Notebook",
"bytes": "1940739"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "44794"
},
{
"name": "Objective-C",
"bytes": "8665"
},
{
"name": "Objective-C++",
"bytes": "75338"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6186"
},
{
"name": "Perl 6",
"bytes": "1360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "29848838"
},
{
"name": "Ruby",
"bytes": "435"
},
{
"name": "Shell",
"bytes": "401557"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20160528_0900'),
]
operations = [
migrations.AddField(
model_name='comment',
name='name',
field=models.CharField(default='Anonymous', max_length=50),
),
migrations.AlterField(
model_name='comment',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date Created'),
),
migrations.AlterField(
model_name='comment',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='comment',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='comment',
name='status',
field=models.CharField(choices=[('published', 'Published'), ('hidden', 'Hidden'), ('deleted', 'Deleted')], default='hidden', max_length=50),
),
migrations.AlterField(
model_name='comment',
name='url',
field=models.URLField(blank=True),
),
migrations.AlterField(
model_name='post',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date Created'),
),
migrations.AlterField(
model_name='post',
name='published',
field=models.DateTimeField(auto_now=True, verbose_name='Date Published'),
),
migrations.AlterField(
model_name='post',
name='status',
field=models.CharField(choices=[('draft', 'Draft'), ('published', 'Published'), ('hidden', 'Hidden'), ('deleted', 'Deleted')], default='draft', max_length=50),
),
migrations.AlterField(
model_name='tag',
name='frequency',
field=models.IntegerField(default=0),
),
]
|
{
"content_hash": "cde22a53c5df27ea52098abfbfbb8b5b",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 171,
"avg_line_length": 34.92307692307692,
"alnum_prop": 0.5629955947136563,
"repo_name": "rocity/dj-blog",
"id": "b7927a60b399cc3e1ba9679b7c204b8742c3c86b",
"size": "2342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0003_auto_20160601_0903.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "831"
},
{
"name": "HTML",
"bytes": "19462"
},
{
"name": "JavaScript",
"bytes": "1244"
},
{
"name": "Python",
"bytes": "18967"
}
],
"symlink_target": ""
}
|
import json
from django_webtest import WebTest
from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework.renderers import JSONRenderer
from cellcounter.main.models import CellType
from .defaults import MOCK_KEYBOARD, DEFAULT_KEYBOARD_STRING
from .factories import UserFactory, KeyboardFactory, KeyMapFactory
from .models import Keyboard
from .serializers import KeyboardSerializer
class KeyboardTestCase(TestCase):
def test_unicode(self):
keyboard = KeyboardFactory(user__username='alpha', label='alpha')
self.assertEqual(keyboard.__unicode__(), 'Keyboard alpha for alpha')
def test_set_primary_self(self):
keyboard = KeyboardFactory(add_maps=False)
keyboard.set_primary()
self.assertTrue(Keyboard.objects.get(id=keyboard.id).is_primary)
def test_set_primary_other(self):
user = UserFactory()
keyboard1 = KeyboardFactory(user=user, is_primary=True, add_maps=False)
keyboard2 = KeyboardFactory(user=user, add_maps=False)
self.assertTrue(keyboard1.is_primary)
self.assertFalse(Keyboard.objects.get(id=keyboard2.id).is_primary)
keyboard2.set_primary()
self.assertTrue(Keyboard.objects.get(id=keyboard2.id).is_primary)
self.assertFalse(Keyboard.objects.get(id=keyboard1.id).is_primary)
def test_delete_no_primary(self):
keyboard = KeyboardFactory(add_maps=False)
keyboard.delete()
self.assertEqual(len(Keyboard.objects.all()), 0)
def test_delete_change_primary(self):
user = UserFactory()
keyboard1 = KeyboardFactory(user=user, is_primary=True, add_maps=False)
keyboard2 = KeyboardFactory(user=user, add_maps=False)
keyboard1.delete()
self.assertEqual(len(Keyboard.objects.all()), 1)
self.assertTrue(Keyboard.objects.get(id=keyboard2.id).is_primary)
def test_set_keymaps(self):
user = UserFactory()
keyboard = KeyboardFactory(user=user, is_primary=True)
number_old_maps = len(keyboard.mappings.all())
new_maps = [KeyMapFactory(cellid=CellType.objects.get(id=1))]
keyboard.set_keymaps(new_maps)
self.assertNotEqual(number_old_maps, len(keyboard.mappings.all()))
self.assertEqual(len(new_maps), len(keyboard.mappings.all()))
class DefaultKeyboardAPITest(WebTest):
csrf_checks = False
def setUp(self):
self.user = UserFactory()
self.keyboard = KeyboardFactory(is_primary=True, user=self.user)
def test_get_keyboard_anon(self):
response = self.app.get(reverse('default-keyboard'))
self.assertEqual(response.body, DEFAULT_KEYBOARD_STRING)
def test_get_no_primary(self):
user = UserFactory()
response = self.app.get(reverse('default-keyboard'), user=user.username)
self.assertEqual(DEFAULT_KEYBOARD_STRING, response.body)
def test_get_primary_set(self):
response = self.app.get(reverse('default-keyboard'), user=self.user.username)
serializer = KeyboardSerializer(self.keyboard)
self.assertEqual(JSONRenderer().render(serializer.data), response.body)
class KeyboardsListCreateAPITest(WebTest):
csrf_checks = False
def setUp(self):
self.user = UserFactory()
self.keyboard = KeyboardFactory(is_primary=True, user=self.user)
def test_get_anon_empty(self):
response = self.app.get(reverse('keyboards'), status=403)
self.assertEqual(response.status_code, 403)
def test_get_user_kb_list(self):
response = self.app.get(reverse('keyboards'), user=self.user)
queryset = Keyboard.objects.filter(user=self.user)
serializer = KeyboardSerializer(queryset, many=True)
self.assertEqual(JSONRenderer().render(serializer.data), response.body)
def test_post_keyboard_logged_out(self):
response = self.app.post(reverse('keyboards'), MOCK_KEYBOARD, status=403)
self.assertEqual(response.status_code, 403)
def test_post_keyboard_logged_in(self):
response = self.app.post(reverse('keyboards'),
json.dumps(MOCK_KEYBOARD),
headers={'Content-Type': 'application/json'},
user=self.user.username,
status=201)
self.assertEqual(response.status_code, 201)
self.assertEqual(len(Keyboard.objects.filter(user=self.user)), 2)
def test_post_keyboard_missing_fields(self):
response = self.app.post(reverse('keyboards'),
json.dumps({k: v for k, v in
MOCK_KEYBOARD.iteritems() if k != 'label'}),
headers={'Content-Type': 'application/json'},
user=self.user.username,
status=400)
self.assertEqual(response.body, '{"label":["This field is required."]}')
self.assertEqual(response.status_code, 400)
def test_post_keyboard_missing_mappings(self):
response = self.app.post(reverse('keyboards'),
json.dumps({k: v for k, v in
MOCK_KEYBOARD.iteritems() if k != 'mappings'}),
headers={'Content-Type': 'application/json'},
user=self.user.username,
status=400)
self.assertEqual('{"mappings":["This field is required."]}', response.body)
self.assertEqual(response.status_code, 400)
class KeyboardAPITest(WebTest):
csrf_checks = False
def setUp(self):
self.user = UserFactory()
self.keyboard = KeyboardFactory(is_primary=True, user=self.user)
def test_get_keyboard_detail_anon(self):
response = self.app.get(reverse('keyboard-detail',
kwargs={'keyboard_id': self.keyboard.id}),
status=403)
self.assertEqual(response.status_code, 403)
def test_get_anothers_keyboard(self):
user = UserFactory()
response = self.app.get(reverse('keyboard-detail',
kwargs={'keyboard_id': self.keyboard.id}),
user=user.username, status=404)
self.assertEqual(response.status_code, 404)
def get_own_keyboard_detail(self):
response = self.app.get(reverse('keyboard-detail',
kwargs={'keyboard_id': self.keyboard.id}),
user=self.user.username)
serializer = KeyboardSerializer(self.keyboard)
self.assertEqual(response.body, JSONRenderer().render(serializer.data))
def get_nonexistent_keyboard_detail(self):
response = self.app.get(reverse('keyboard-detail',
kwargs={'keyboard_id': 99}),
user=self.user.username,
status=404)
self.assertEqual(response.status_code, 404)
def test_post_keyboard_detail_fails(self):
response = self.app.post(reverse('keyboard-detail',
kwargs={'keyboard_id': self.keyboard.id}),
json.dumps(MOCK_KEYBOARD),
headers={'Content-Type': 'application/json'},
user=self.user.username,
status=405)
self.assertEqual(response.status_code, 405)
def test_put_own_keyboard_logged_in(self):
keyboard = KeyboardFactory(user=self.user, is_primary=False)
response = self.app.put(reverse('keyboard-detail', kwargs={'keyboard_id': keyboard.id}),
json.dumps(MOCK_KEYBOARD),
headers={'Content-Type': 'application/json'},
user=self.user.username,
status=200)
self.assertEqual(response.status_code, 200)
def test_put_anothers_keyboard_logged_in(self):
user = UserFactory()
keyboard = KeyboardFactory(user=self.user, is_primary=False)
response = self.app.put(reverse('keyboard-detail', kwargs={'keyboard_id': keyboard.id}),
json.dumps(MOCK_KEYBOARD),
headers={'Content-Type': 'application/json'},
user=user.username,
status=404)
self.assertEqual(response.status_code, 404)
def test_put_nonexistent_keyboard_logged_in(self):
response = self.app.put(reverse('keyboard-detail', kwargs={'keyboard_id': 99}),
json.dumps(MOCK_KEYBOARD),
headers={'Content-Type': 'application/json'},
user=self.user.username,
status=404)
self.assertEqual(response.status_code, 404)
def test_put_keyboard_no_mappings(self):
response = self.app.put(reverse('keyboard-detail', kwargs={'keyboard_id': self.keyboard.id}),
json.dumps({k: v for k, v in
MOCK_KEYBOARD.iteritems() if k != 'mappings'}),
headers={'Content-Type': 'application/json'},
user=self.user.username,
status=400)
self.assertEqual('{"mappings":["This field is required."]}', response.body)
self.assertEqual(response.status_code, 400)
def test_put_keyboard_missing_fields(self):
response = self.app.put(reverse('keyboard-detail', kwargs={'keyboard_id': self.keyboard.id}),
json.dumps({k: v for k, v in
MOCK_KEYBOARD.iteritems() if k != 'label'}),
headers={'Content-Type': 'application/json'},
user=self.user.username,
status=400)
self.assertEqual(response.body, '{"label":["This field is required."]}')
self.assertEqual(response.status_code, 400)
def test_put_keyboard_logged_out(self):
response = self.app.put(reverse('keyboard-detail', kwargs={'keyboard_id': self.keyboard.id}),
json.dumps(MOCK_KEYBOARD),
headers={'Content-Type': 'application/json'},
status=403)
self.assertEqual(response.status_code, 403)
def test_delete_keyboard_logged_out(self):
response = self.app.delete(reverse('keyboard-detail',
kwargs={'keyboard_id': self.keyboard.id}),
status=403)
self.assertEqual(response.status_code, 403)
def test_delete_keyboard_not_exists(self):
response = self.app.delete(reverse('keyboard-detail',
kwargs={'keyboard_id': 99}),
user=self.user.username,
status=404)
self.assertEqual(response.status_code, 404)
def test_delete_anothers_keyboard(self):
user = UserFactory()
response = self.app.delete(reverse('keyboard-detail',
kwargs={'keyboard_id': self.keyboard.id}),
user=user.username,
status=404)
self.assertEqual(response.status_code, 404)
def test_delete_keyboard_exists(self):
response = self.app.delete(reverse('keyboard-detail',
kwargs={'keyboard_id': self.keyboard.id}),
user=self.user.username,
status=204)
self.assertEqual(response.status_code, 204)
with self.assertRaises(Keyboard.DoesNotExist):
Keyboard.objects.get(id=self.keyboard.id)
|
{
"content_hash": "727d082f70793b235ef99cb4fa0601d0",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 101,
"avg_line_length": 46.582375478927204,
"alnum_prop": 0.5688435597960191,
"repo_name": "haematologic/cellcounter",
"id": "d0a5371e4c5057244e9189138760ec2283a0d14e",
"size": "12158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cellcounter/cc_kapi/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8020"
},
{
"name": "HTML",
"bytes": "39444"
},
{
"name": "JavaScript",
"bytes": "41956"
},
{
"name": "Python",
"bytes": "104289"
}
],
"symlink_target": ""
}
|
from futurist import periodics
import os
import threading
import time
from oslo_log import log
import ceilometer
from ceilometer import monasca_client as mon_client
from ceilometer import publisher
from ceilometer.publisher.monasca_data_filter import MonascaDataFilter
from monascaclient import exc
import traceback
# Have to use constants rather than conf to satisfy @periodicals
BATCH_POLLING_INTERVAL = 5
BATCH_RETRY_INTERVAL = 60
LOG = log.getLogger(__name__)
class MonascaPublisher(publisher.ConfigPublisherBase):
"""Publisher to publish samples to monasca using monasca-client.
Example URL to place in pipeline.yaml:
- monasca://http://192.168.10.4:8070/v2.0
"""
def __init__(self, conf, parsed_url):
super(MonascaPublisher, self).__init__(conf, parsed_url)
# list to hold metrics to be published in batch (behaves like queue)
self.metric_queue = []
self.time_of_last_batch_run = time.time()
self.mon_client = mon_client.Client(self.conf, parsed_url)
self.mon_filter = MonascaDataFilter(self.conf)
# add flush_batch function to periodic callables
periodic_callables = [
# The function to run + any automatically provided
# positional and keyword arguments to provide to it
# everytime it is activated.
(self.flush_batch, (), {}),
]
if self.conf.monasca.retry_on_failure:
# list to hold metrics to be re-tried (behaves like queue)
self.retry_queue = []
# list to store retry attempts for metrics in retry_queue
self.retry_counter = []
# add retry_batch function to periodic callables
periodic_callables.append((self.retry_batch, (), {}))
if self.conf.monasca.archive_on_failure:
archive_path = self.conf.monasca.archive_path
if not os.path.exists(archive_path):
archive_path = self.conf.find_file(archive_path)
self.archive_handler = publisher.get_publisher(
self.conf,
'file://' +
str(archive_path),
'ceilometer.sample.publisher')
# start periodic worker
self.periodic_worker = periodics.PeriodicWorker(periodic_callables)
self.periodic_thread = threading.Thread(
target=self.periodic_worker.start)
self.periodic_thread.daemon = True
self.periodic_thread.start()
def _publish_handler(self, func, metrics, batch=False):
"""Handles publishing and exceptions that arise."""
try:
metric_count = len(metrics)
if batch:
func(**{'jsonbody': metrics})
else:
func(**metrics[0])
LOG.info('Successfully published %d metric(s)' % metric_count)
except mon_client.MonascaServiceException:
# Assuming atomicity of create or failure - meaning
# either all succeed or all fail in a batch
LOG.error('Metric create failed for %(count)d metric(s) with'
' name(s) %(names)s ' %
({'count': len(metrics),
'names': ','.join([metric['name']
for metric in metrics])}))
if self.conf.monasca.retry_on_failure:
# retry payload in case of internal server error(500),
# service unavailable error(503),bad gateway (502) or
# Communication Error
# append failed metrics to retry_queue
LOG.debug('Adding metrics to retry queue.')
self.retry_queue.extend(metrics)
# initialize the retry_attempt for the each failed
# metric in retry_counter
self.retry_counter.extend(
[0 * i for i in range(metric_count)])
else:
if hasattr(self, 'archive_handler'):
self.archive_handler.publish_samples(None, metrics)
except Exception:
LOG.info(traceback.format_exc())
if hasattr(self, 'archive_handler'):
self.archive_handler.publish_samples(None, metrics)
def publish_samples(self, samples):
"""Main method called to publish samples."""
for sample in samples:
metric = self.mon_filter.process_sample_for_monasca(sample)
# In batch mode, push metric to queue,
# else publish the metric
if self.conf.monasca.batch_mode:
LOG.debug('Adding metric to queue.')
self.metric_queue.append(metric)
else:
LOG.info('Publishing metric with name %(name)s and'
' timestamp %(ts)s to endpoint.' %
({'name': metric['name'],
'ts': metric['timestamp']}))
self._publish_handler(self.mon_client.metrics_create, [metric])
def is_batch_ready(self):
"""Method to check if batch is ready to trigger."""
previous_time = self.time_of_last_batch_run
current_time = time.time()
elapsed_time = current_time - previous_time
if elapsed_time >= self.conf.monasca.batch_timeout and len(self.
metric_queue) > 0:
LOG.info('Batch timeout exceeded, triggering batch publish.')
return True
else:
if len(self.metric_queue) >= self.conf.monasca.batch_count:
LOG.info('Batch queue full, triggering batch publish.')
return True
else:
return False
@periodics.periodic(BATCH_POLLING_INTERVAL)
def flush_batch(self):
"""Method to flush the queued metrics."""
# print "flush batch... %s" % str(time.time())
if self.is_batch_ready():
# publish all metrics in queue at this point
batch_count = len(self.metric_queue)
LOG.info("batch is ready: batch_count %s" % str(batch_count))
self._publish_handler(self.mon_client.metrics_create,
self.metric_queue[:batch_count],
batch=True)
self.time_of_last_batch_run = time.time()
# slice queue to remove metrics that
# published with success or failed and got queued on
# retry queue
self.metric_queue = self.metric_queue[batch_count:]
def is_retry_ready(self):
"""Method to check if retry batch is ready to trigger."""
if len(self.retry_queue) > 0:
LOG.info('Retry queue has items, triggering retry.')
return True
else:
return False
@periodics.periodic(BATCH_RETRY_INTERVAL)
def retry_batch(self):
"""Method to retry the failed metrics."""
# print "retry batch...%s" % str(time.time())
if self.is_retry_ready():
retry_count = len(self.retry_queue)
# Iterate over the retry_queue to eliminate
# metrics that have maxed out their retry attempts
for ctr in range(retry_count):
if self.retry_counter[ctr] > self.conf.\
monasca.batch_max_retries:
if hasattr(self, 'archive_handler'):
self.archive_handler.publish_samples(
None,
[self.retry_queue[ctr]])
LOG.info('Removing metric %s from retry queue.'
' Metric retry maxed out retry attempts' %
self.retry_queue[ctr]['name'])
del self.retry_queue[ctr]
del self.retry_counter[ctr]
# Iterate over the retry_queue to retry the
# publish for each metric.
# If an exception occurs, the retry count for
# the failed metric is incremented.
# If the retry succeeds, remove the metric and
# the retry count from the retry_queue and retry_counter resp.
ctr = 0
while ctr < len(self.retry_queue):
try:
LOG.info('Retrying metric publish from retry queue.')
self.mon_client.metrics_create(**self.retry_queue[ctr])
# remove from retry queue if publish was success
LOG.info('Retrying metric %s successful,'
' removing metric from retry queue.' %
self.retry_queue[ctr]['name'])
del self.retry_queue[ctr]
del self.retry_counter[ctr]
except exc.ClientException:
LOG.error('Exception encountered in retry. '
'Batch will be retried in next attempt.')
# if retry failed, increment the retry counter
self.retry_counter[ctr] += 1
ctr += 1
def flush_to_file(self):
# TODO(persist maxed-out metrics to file)
pass
def publish_events(self, events):
"""Send an event message for publishing
:param events: events from pipeline after transformation
"""
raise ceilometer.NotImplementedError
|
{
"content_hash": "bd7e7ef6e7c9ffeec4b8df3f7a7f4589",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 79,
"avg_line_length": 40.25321888412017,
"alnum_prop": 0.5614671073675231,
"repo_name": "openstack/ceilometer",
"id": "9d9839f1037c6e8e5f6e553542f044d680a0799b",
"size": "9992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/publisher/monasca.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1333367"
},
{
"name": "Shell",
"bytes": "18703"
}
],
"symlink_target": ""
}
|
from cliff.command import Command
import call_server as server
class AppDelete(Command):
def get_parser(self, prog_name):
parser = super(AppDelete, self).get_parser(prog_name)
parser.add_argument('app_name')
return parser
def take_action(self, parsed_args):
app_name = parsed_args.app_name
server.TakeAction().delete_app(app_name)
|
{
"content_hash": "7221849f9733f268b2cd2d5ee1c83ede",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 21.72222222222222,
"alnum_prop": 0.659846547314578,
"repo_name": "cloud-ark/cloudark",
"id": "389ae0774d98442c29047583aebbcf23681c336a",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/fmcmds/app_delete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "328140"
},
{
"name": "Shell",
"bytes": "8859"
}
],
"symlink_target": ""
}
|
'''
Usage:
python3 backlog.py issues.csv
Reference: http://www.backlog.jp/api/
'''
BACKLOG_USER = ''
BACKLOG_PASS = ''
BACKLOG_HOST = '' # Ex: abc.backlog.jp
DEBUG = False
import sys
import csv
from xmlrpc.client import ServerProxy
class BacklogAPI:
client = None
def __init__(self, user, passwd, host):
url = 'https://{0}:{1}@{2}/XML-RPC'.format(user, passwd, host)
self.client = ServerProxy(url, verbose=DEBUG)
def import_issue(self, csv_path):
with open(csv_path) as f:
reader = csv.reader(f)
for row in reader:
self._add_issue(row)
print(row)
def _add_issue(self, detail):
(no,project,summary,description,parent_id,due_date,assigner,priority) = detail
arg = dict()
arg['projectId'] = self.get_projectid(project)
arg['summary'] = summary
arg['description'] = description
if (parent_id):
arg['parent_issue_id'] = parent_id
if (due_date):
arg['due_date'] = due_date
if (assigner):
arg['assignerId'] = self.get_userid(assigner)
if (priority):
arg['priorityId'] = priority
self.call('backlog.createIssue', arg)
def get_userid(self, name):
''' name should be the login id'''
response = self.call('backlog.getUser', name)
return response.id
def get_projectid(self, name):
''' project name is slug of project '''
response = self.call('backlog.getProject', name)
return response.id
def call(self, key, *args):
func = getattr(self.client, key)
return func(*args)
if __name__ == "__main__":
csv_path = None
try:
csv_path = sys.argv[1]
api = BacklogAPI(BACKLOG_USER, BACKLOG_PASS)
api.import_issue(csv_path)
except IndexError:
print("Please specify issues in CSV format")
sys.exit(2)
|
{
"content_hash": "621eb5eb3558071adca319e0d0a8a950",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 86,
"avg_line_length": 28.225352112676056,
"alnum_prop": 0.5598802395209581,
"repo_name": "manhg/backlog-import-csv",
"id": "37dac0e3533c6f49395cdeb5d89b1206bc32eeb6",
"size": "2004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backlog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2004"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# Register your models here.
from article.models import Article
admin.site.register(Article)
|
{
"content_hash": "d6b2f2d5aceb98a5cc3e8cd4f5f61a72",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 34,
"avg_line_length": 16.25,
"alnum_prop": 0.8,
"repo_name": "meranamvarun/django_practice",
"id": "31e58aaae96cb0766dde9e1a4a2d161229bc5cc1",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "article/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42439"
},
{
"name": "HTML",
"bytes": "3858"
},
{
"name": "JavaScript",
"bytes": "77703"
},
{
"name": "Python",
"bytes": "11465"
}
],
"symlink_target": ""
}
|
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
import browser
import time
class PageObject:
XPATH_RADIO = '//div[@class="custom-tumbler" ' \
'and input[@type="radio" and @name="{}" and @value="{}"]]'
XPATH_CHECKBOX = \
'//div[@class="custom-tumbler" ' \
'and input[@type="checkbox" and @name="{}"]]'
def __init__(self, parent=None):
self.parent = parent or browser.driver
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def wait_until_moving(element, timeout=10):
class Move:
def __init__(self, elem):
self.element = elem
self.location = elem.location
def __call__(self, *args, **kwargs):
loc = element.location
res = self.location['x'] == loc['x'] \
and self.location['y'] == loc['y']
self.location = loc
return res
wait = WebDriverWait(browser.driver, timeout)
wait.until(Move(element))
@staticmethod
def wait_until_exists(element, timeout=10):
wait = WebDriverWait(browser.driver, timeout)
try:
wait.until(lambda driver: not element.is_displayed())
except StaleElementReferenceException:
pass
@staticmethod
def wait_element(page_object, attribute, timeout=10):
class El:
def __init__(self, page_object, attribute):
self.page_object = page_object
self.attribute = attribute
def __call__(self, *args, **kwargs):
try:
getattr(self.page_object, attribute)
return True
except NoSuchElementException:
return False
wait = WebDriverWait(browser.driver, timeout)
wait.until(El(page_object, attribute))
@staticmethod
def long_wait_element(page_object, attribute, timeout=40):
class El:
def __init__(self, page_object, attribute):
self.page_object = page_object
self.attribute = attribute
def __call__(self, *args, **kwargs):
try:
getattr(self.page_object, attribute)
return True
except (NoSuchElementException,
StaleElementReferenceException):
return False
wait = WebDriverWait(browser.driver, timeout)
wait.until(El(page_object, attribute))
@staticmethod
def click_element(page_object, *args):
# get the list of attributes passed to the method
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
"""1, 3, 4 are the number of passed to the method attributes
1 means that only class name and one property
were passed to the method
3 means that class name, two properties and index
of the element were passed to the method
4 means that class name, three properties and index
of the element were passed to the method
"""
if len(attributes) == 1:
getattr(page_object, attributes[0]).click()
elif len(attributes) == 3:
getattr(getattr(page_object, attributes[0])
[attributes[2]], attributes[1]).click()
elif len(attributes) == 4:
getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2]).click()
break
except (StaleElementReferenceException, NoSuchElementException,
WebDriverException):
time.sleep(0.5)
attempts += 1
@staticmethod
def find_element(page_object, *args):
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
if len(attributes) == 1:
return getattr(page_object, attributes[0])
elif len(attributes) == 3:
return getattr(getattr(page_object,
attributes[0])[attributes[2]],
attributes[1])
elif len(attributes) == 4:
return getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2])
break
except (StaleElementReferenceException, NoSuchElementException,
WebDriverException):
time.sleep(0.5)
attempts += 1
@staticmethod
def get_text(page_object, *args):
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
if len(attributes) == 1:
return getattr(page_object, attributes[0]).text
elif len(attributes) == 3:
return getattr(getattr(page_object,
attributes[0])[attributes[2]],
attributes[1]).text
elif len(attributes) == 4:
return getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2]).text
break
except (StaleElementReferenceException, NoSuchElementException):
time.sleep(0.5)
attempts += 1
@staticmethod
def get_lower_text(page_object, *args):
attributes = [attribute for attribute in args]
attempts = 0
while attempts < 5:
try:
if len(attributes) == 1:
return getattr(page_object, attributes[0]).text.lower()
elif len(attributes) == 3:
return getattr(getattr(page_object,
attributes[0])[attributes[2]],
attributes[1]).text.lower()
elif len(attributes) == 4:
return getattr(getattr(getattr(page_object,
attributes[0])[attributes[3]],
attributes[1]), attributes[2]).text.lower()
break
except (StaleElementReferenceException, NoSuchElementException):
time.sleep(0.5)
attempts += 1
class Popup(PageObject):
def __init__(self):
element = browser.driver.find_element_by_css_selector('div.modal')
PageObject.__init__(self, element)
time.sleep(0.5)
# PageObject.wait_until_moving(self.parent)
def wait_until_exists(self):
try:
PageObject.wait_until_exists(
browser.driver.
find_element_by_css_selector('div.modal-backdrop'))
except NoSuchElementException:
pass
# Check that element is displayed
@staticmethod
def wait_until_element_will_be_displayed(self, element):
try:
wait = WebDriverWait(browser.driver, 3)
wait.until(element.is_displayed())
except NoSuchElementException:
pass
@property
def close_cross(self):
return self.parent.find_element_by_css_selector('.close')
@property
def header(self):
return self.parent.find_element_by_css_selector('.modal-header > h3')
class ConfirmPopup(Popup):
TEXT = 'Settings were modified but not saved'
@property
def stay_on_page(self):
return self.parent.find_element_by_css_selector('.btn-return')
@property
def leave_page(self):
return self.parent.\
find_element_by_css_selector('.proceed-btn')
|
{
"content_hash": "52c2a9f713ca2424b650ef27af227505",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 78,
"avg_line_length": 36.56140350877193,
"alnum_prop": 0.5296305182341651,
"repo_name": "dancn/fuel-main-dev",
"id": "7c0a372711f7d76c5cfb90754acb66c2a8197f16",
"size": "8336",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fuelweb_ui_test/pageobjects/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2180"
},
{
"name": "Makefile",
"bytes": "96551"
},
{
"name": "Pascal",
"bytes": "54"
},
{
"name": "Perl",
"bytes": "197"
},
{
"name": "Puppet",
"bytes": "40130"
},
{
"name": "Python",
"bytes": "836337"
},
{
"name": "Ruby",
"bytes": "43759"
},
{
"name": "Shell",
"bytes": "123485"
}
],
"symlink_target": ""
}
|
def make_parent(parent: str) -> str:
# Sample function parameter parent in list_annotations_sample
parent = parent
return parent
|
{
"content_hash": "07b1645e7c4b91ff4da15f690ac5fde7",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 65,
"avg_line_length": 23.833333333333332,
"alnum_prop": 0.7062937062937062,
"repo_name": "sasha-gitg/python-aiplatform",
"id": "cd8f117a8544516c80599e5f34b134c698317d20",
"size": "720",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".sample_configs/param_handlers/list_annotations_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "11216304"
},
{
"name": "Shell",
"bytes": "30838"
}
],
"symlink_target": ""
}
|
from pyface.resource_manager import *
|
{
"content_hash": "eddf27c77cda1c31fb28198b494bf620",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 37,
"avg_line_length": 38,
"alnum_prop": 0.8157894736842105,
"repo_name": "enthought/etsproxy",
"id": "ef5048f0b94a547aa4b352594137f287fa5f1f74",
"size": "53",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/pyface/resource_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
"""Landlab component for overland flow using a local implicit solution to the
kinematic-wave approximation.
Created on Fri May 27 14:26:13 2016
@author: gtucker
"""
import numpy as np
from scipy.optimize import newton
from landlab import Component
from landlab.components import FlowAccumulator
def water_fn(x, a, b, c, d, e):
r"""Evaluates the solution to the water-depth equation.
Called by scipy.newton() to find solution for :math:`x`
using Newton's method.
Parameters
----------
x : float
Water depth at new time step.
a : float
"alpha" parameter (see below)
b : float
Weighting factor on new versus old time step. :math:`b=1` means purely
implicit solution with all weight on :math:`H` at new time
step. :math:`b=0` (not recommended) would mean purely explicit.
c : float
Water depth at old time step (time step :math:`t` instead
of :math:`t+1`)
d : float
Depth-discharge exponent; normally either 5/3 (Manning) or 3/2 (Chezy)
e : float
Water inflow volume per unit cell area in one time step.
This equation represents the implicit solution for water depth
:math:`H` at the next time step. In the code below, it is
formulated in a generic way. Written using more familiar
terminology, the equation is:
.. math::
H - H_0 + \alpha ( w H + (w-1) H_0)^d - \Delta t (R + Q_{in} / A)
.. math::
\alpha = \frac{\Delta t \sum S^{1/2}}{C_f A}
where :math:`H` is water depth at the given node at the new
time step, :math:`H_0` is water depth at the prior time step,
:math:`w` is a weighting factor, :math:`d` is the depth-discharge
exponent (2/3 or 1/2), :math:`\Delta t` is time-step duration,
:math:`R` is local runoff rate, :math:`Q_{in}` is inflow
discharge, :math:`A` is cell area, :math:`C_f` is a
dimensional roughness coefficient, and :math:`\sum S^{1/2}`
represents the sum of square-root-of-downhill-gradient over
all outgoing (downhill) links.
"""
return x - c + a * (b * x + (b - 1.0) * c) ** d - e
class KinwaveImplicitOverlandFlow(Component):
r"""Calculate shallow water flow over topography.
Landlab component that implements a two-dimensional kinematic wave model.
This is a form of the 2D shallow-water equations in which energy slope is
assumed to equal bed slope. The solution method is locally implicit, and
works as follows. At each time step, we iterate from upstream to downstream
over the topography. Because we are working downstream, we can assume that
we know the total water inflow to a given cell. We solve the following mass
conservation equation at each cell:
.. math::
(H^{t+1} - H^t)/\Delta t = Q_{in}/A - Q_{out}/A + R
where :math:`H` is water depth, :math:`t` indicates time step
number, :math:`\Delta t` is time step duration, :math:`Q_{in}` is
total inflow discharge, :math:`Q_{out}` is total outflow
discharge, :math:`A` is cell area, and :math:`R` is local
runoff rate (precipitation minus infiltration; could be
negative if runon infiltration is occurring).
The specific outflow discharge leaving a cell along one of its faces is:
.. math::
q = (1/C_r) H^\alpha S^{1/2}
where :math:`C_r` is a roughness coefficient (such as
Manning's n), :math:`\alpha` is an exponent equal to :math:`5/3`
for the Manning equation and :math:`3/2` for the Chezy family,
and :math:`S` is the downhill-positive gradient of the link
that crosses this particular face. Outflow discharge is zero
for links that are flat or "uphill" from the given node.
Total discharge out of a cell is then the sum of (specific
discharge x face width) over all outflow faces
.. math::
Q_{out} = \sum_{i=1}^N (1/C_r) H^\alpha S_i^{1/2} W_i
where :math:`N` is the number of outflow faces (i.e., faces
where the ground slopes downhill away from the cell's node),
and :math:`W_i` is the width of face :math:`i`.
We use the depth at the cell's node, so this simplifies to:
.. math::
Q_{out} = (1/C_r) H'^\alpha \sum_{i=1}^N S_i^{1/2} W_i
We define :math:`H` in the above as a weighted sum of
the "old" (time step :math:`t`) and "new" (time step :math:`t+1`)
depth values:
.. math::
H' = w H^{t+1} + (1-w) H^t
If :math:`w=1`, the method is fully implicit. If :math:`w=0`,
it is a simple forward explicit method.
When we combine these equations, we have an equation that includes the
unknown :math:`H^{t+1}` and a bunch of terms that are known.
If :math:`w\ne 0`, it is a nonlinear equation in :math:`H^{t+1}`,
and must be solved iteratively. We do this using a root-finding
method in the scipy.optimize library.
Examples
--------
>>> from landlab import RasterModelGrid
>>> rg = RasterModelGrid((4, 5), xy_spacing=10.0)
>>> z = rg.add_zeros("topographic__elevation", at="node")
>>> kw = KinwaveImplicitOverlandFlow(rg)
>>> round(kw.runoff_rate * 1.0e7, 2)
2.78
>>> kw.vel_coef # default value
100.0
>>> rg.at_node['surface_water__depth'][6:9]
array([ 0., 0., 0.])
References
----------
**Required Software Citation(s) Specific to this Component**
None Listed
**Additional References**
None Listed
"""
_name = "KinwaveImplicitOverlandFlow"
_unit_agnostic = False
_info = {
"surface_water__depth": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Depth of water on the surface",
},
"surface_water_inflow__discharge": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m3/s",
"mapping": "node",
"doc": "water volume inflow rate to the cell around each node",
},
"topographic__elevation": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
},
"topographic__gradient": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m/m",
"mapping": "link",
"doc": "Gradient of the ground surface",
},
}
def __init__(
self,
grid,
runoff_rate=1.0,
roughness=0.01,
changing_topo=False,
depth_exp=1.5,
weight=1.0,
):
"""Initialize the KinwaveImplicitOverlandFlow.
Parameters
----------
grid : ModelGrid
Landlab ModelGrid object
runoff_rate : float, optional (defaults to 1 mm/hr)
Precipitation rate, mm/hr. The value provided is divided by
3600000.0.
roughness : float, defaults to 0.01
Manning roughness coefficient; units depend on depth_exp.
changing_topo : boolean, optional (defaults to False)
Flag indicating whether topography changes between time steps
depth_exp : float (defaults to 1.5)
Exponent on water depth in velocity equation (3/2 for Darcy/Chezy,
5/3 for Manning)
weight : float (defaults to 1.0)
Weighting on depth at new time step versus old time step (1 = all
implicit; 0 = explicit)
"""
super().__init__(grid)
# Store parameters and do unit conversion
self._runoff_rate = runoff_rate / 3600000.0 # convert to m/s
self._vel_coef = 1.0 / roughness # do division now to save time
self._changing_topo = changing_topo
self._depth_exp = depth_exp
self._weight = weight
# Get elevation field
self._elev = grid.at_node["topographic__elevation"]
# Create fields...
self.initialize_output_fields()
self._depth = grid.at_node["surface_water__depth"]
self._slope = grid.at_link["topographic__gradient"]
self._disch_in = grid.at_node["surface_water_inflow__discharge"]
# This array holds, for each node, the sum of sqrt(slope) x face width
# for each link/face.
self._grad_width_sum = grid.zeros("node")
# This array holds the prefactor in the algebraic equation that we
# will find a solution for.
self._alpha = grid.zeros("node")
# Instantiate flow router
self._flow_accum = FlowAccumulator(
grid,
"topographic__elevation",
flow_director="MFD",
partition_method="square_root_of_slope",
)
# Flag to let us know whether this is our first iteration
self._first_iteration = True
@property
def runoff_rate(self):
"""Runoff rate.
Parameters
----------
runoff_rate : float, optional (defaults to 1 mm/hr)
Precipitation rate, mm/hr. The value provide is divided by
3600000.0.
Returns
-------
The current value of the runoff rate.
"""
return self._runoff_rate
@runoff_rate.setter
def runoff_rate(self, new_rate):
assert new_rate > 0
self._runoff_rate = new_rate / 3600000.0 # convert to m/s
@property
def vel_coef(self):
"""Velocity coefficient."""
return self._vel_coef
@property
def depth(self):
"""The depth of water at each node."""
return self._depth
def run_one_step(self, dt):
"""Calculate water flow for a time period `dt`."""
# If it's our first iteration, or if the topography may be changing,
# do flow routing and calculate square root of slopes at links
if self._changing_topo or self._first_iteration:
# Calculate the ground-surface slope
self._slope[self._grid.active_links] = self._grid.calc_grad_at_link(
self._elev
)[self._grid.active_links]
# Take square root of slope magnitude for use in velocity eqn
self._sqrt_slope = np.sqrt(np.abs(self._slope))
# Re-route flow, which gives us the downstream-to-upstream
# ordering
self._flow_accum.run_one_step()
self._nodes_ordered = self._grid.at_node["flow__upstream_node_order"]
self._flow_lnks = self._grid.at_node["flow__link_to_receiver_node"]
# (Re)calculate, for each node, sum of sqrt(gradient) x width
self._grad_width_sum[:] = 0.0
for i in range(self._flow_lnks.shape[1]):
self._grad_width_sum[:] += (
self._sqrt_slope[self._flow_lnks[:, i]]
* self._grid.length_of_face[
self._grid.face_at_link[self._flow_lnks[:, i]]
]
)
# Calculate values of alpha, which is defined as
#
# $\alpha = \frac{\Sigma W S^{1/2} \Delta t}{A C_r}$
cores = self._grid.core_nodes
self._alpha[cores] = (
self._vel_coef
* self._grad_width_sum[cores]
* dt
/ (self._grid.area_of_cell[self._grid.cell_at_node[cores]])
)
# Zero out inflow discharge
self._disch_in[:] = 0.0
# Upstream-to-downstream loop
for i in range(len(self._nodes_ordered) - 1, -1, -1):
n = self._nodes_ordered[i]
if self._grid.status_at_node[n] == 0:
# Solve for new water depth
aa = self._alpha[n]
cc = self._depth[n]
ee = (dt * self._runoff_rate) + (
dt
* self._disch_in[n]
/ self._grid.area_of_cell[self._grid.cell_at_node[n]]
)
self._depth[n] = newton(
water_fn,
self._depth[n],
args=(aa, self._weight, cc, self._depth_exp, ee),
)
# Calc outflow
Heff = self._weight * self._depth[n] + (1.0 - self._weight) * cc
outflow = (
self._vel_coef * (Heff**self._depth_exp) * self._grad_width_sum[n]
) # this is manning/chezy/darcy
# Send flow downstream. Here we take total inflow discharge
# and partition it among the node's neighbors. For this, we use
# the flow director's "proportions" array, which contains, for
# each node, the proportion of flow that heads out toward each
# of its N neighbors. The proportion is zero if the neighbor is
# uphill; otherwise, it is S^1/2 / sum(S^1/2). If for example
# we have a raster grid, there will be four neighbors and four
# proportions, some of which may be zero and some between 0 and
# 1.
self._disch_in[self._grid.adjacent_nodes_at_node[n]] += (
outflow * self._flow_accum.flow_director._proportions[n]
)
# TODO: the above is enough to implement the solution for flow
# depth, but it does not provide any information about flow
# velocity or discharge on links. This could be added as an
# optional method, perhaps done just before output.
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{
"content_hash": "fe753840eeba95e13174f880f39068d8",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 86,
"avg_line_length": 35.11794871794872,
"alnum_prop": 0.5686331775700935,
"repo_name": "landlab/landlab",
"id": "632bf0fa0f1b72eb38a55ede6472219e460ae703",
"size": "13720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "landlab/components/overland_flow/generate_overland_flow_implicit_kinwave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "762"
},
{
"name": "Cython",
"bytes": "265735"
},
{
"name": "Gherkin",
"bytes": "1601"
},
{
"name": "Jupyter Notebook",
"bytes": "1373117"
},
{
"name": "Makefile",
"bytes": "2250"
},
{
"name": "Python",
"bytes": "4497175"
},
{
"name": "Roff",
"bytes": "445"
},
{
"name": "Shell",
"bytes": "1073"
},
{
"name": "TeX",
"bytes": "42252"
}
],
"symlink_target": ""
}
|
import pyqtgraph as pg
import re
from pathlib import Path
from cued_datalogger.analysis import analysis_window
class Workspace(object):
"""
The ``Workspace`` class stores the workspace attributes and has methods for
saving, loading, configuring, and displaying the workspace settings.
Workspaces are designed so that specific configurations of the DataLogger
can be created, eg. for undergraduate labs, with different features
enabled or disabled, and stored in a ``.wsp`` file that can be read using
the :class:`Workspace` class. In the DataLogger, a ``CurrentWorkspace``
instance is normally initiated that will store the current settings and all
the workspace functionality will be accessed through the
``CurrentWorkspace``.
Attributes
----------
name : str
A human-readable name for this workspace, eg. ``"Lab 4C6"``
path : str
The path to this workspace's directory. Addons will be loaded from the
directory ``path/addons/``, and files will be saved to ``path`` (not
implemented yet). Default value is ``"./"``.
add_ons_enabled : bool
Flag that sets whether to addons are enabled (not implemented yet -
currently has no effect). Default value is ``True``
pyqtgraph_inverted : bool
Flag that sets whether pyqtgraph uses a white background and black
lines (``False``), or black background and white lines (``True``).
Default value is ``False``.
default_pen : str
The default colour of the pen, set by :attr:`pyqtgraph_inverted`.
Cannot be set manually.
pyqtgraph_antialias : bool
Flag that sets whether pyqtgraph uses antialiasing for smoother lines.
Default value is ``True``.
"""
def __init__(self):
# Set default values:
self.name = "Default Workspace"
self.path = str(Path.home())
self.add_ons_enabled = 1
self.pyqtgraph_inverted = 0
self.pyqtgraph_antialias = 1
self.default_pen = None
self.parent = None
self.configure()
def settings(self):
"""A convenience method to access this workspace's configuration"""
return vars(self)
def save(self, destination):
"""Save this workspace to *destination* (of the form
``"/path/to/workspace.wsp"``)."""
print("Saving current workspace to {} ...".format(destination))
print("\t Settings found:")
# Open the destination file
with open(destination, 'w') as wsp_file:
for name, value in vars(self).items():
print("\t {}: {}".format(name, value))
# Write the settings to the file
# Ensure that the strings are written in quotes
if isinstance(value, str):
wsp_file.write("{}='{}'\n".format(name, value))
else:
wsp_file.write("{}={}\n".format(name, value))
print("Done.")
def load(self, workspace):
"""Load the settings found in the .wsp file given by *workspace*`
(of the form ``"/path/to/workspace.wsp"``)."""
print("Loading workspace {} ...".format(workspace))
print("\t Settings found:")
# Open as a read-only file object
with open(workspace, 'r') as wsp_file:
for line in wsp_file:
# Create a regex to match the correct form:
# variable_name=(0 or 1) or variable_name="string/or/path.py"
correct_form = re.compile("(\w*)=([0|1]|\'[\w\s./]*\'\n)")
line_match = re.match(correct_form, line)
# If this line matches
if line_match:
# Split about the equals sign
variable_name, variable_value = line.split('=')
# Sort out types
# Strings will all start with '
if variable_value[0] == "'":
# Split the string with ' as delimiter
# giving ['', "string", ''] and extract the second
# argument
variable_value = variable_value.split("'")[1]
else:
# Otherwise, treat it as an int
variable_value = int(variable_value)
# Check if these are attributes of the Workspace
if hasattr(self, variable_name):
# If so, set the attribute
setattr(self, variable_name, variable_value)
print("\t {}: {}".format(variable_name, variable_value))
print("Done.")
self.configure()
def configure(self):
"""Set the global configuration of the DataLogger
to the settings in this workspace."""
print("Configuring workspace...")
# # Set other settings
# <code>
# Set window settings
if self.parent is not None:
if isinstance(self.parent, analysis_window.AnalysisWindow):
if not self.add_ons_enabled:
self.parent.global_toolbox.removeTab("Addon Manager")
# # Set PyQtGraph settings
if self.default_pen is None:
if self.pyqtgraph_inverted:
self.default_pen = pg.mkPen()
else:
self.default_pen = pg.mkPen('k')
if not self.pyqtgraph_inverted:
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
else:
pg.setConfigOption('background', 'k')
pg.setConfigOption('foreground', 'w')
if self.pyqtgraph_antialias:
pg.setConfigOption('antialias', True)
else:
pg.setConfigOption('antialias', False)
print("Done.")
def set_parent_window(self, parent_window):
self.parent = parent_window
self.parent.CurrentWorkspace = self
self.configure()
|
{
"content_hash": "e69dfd7e5d2f64ee171bca8fd69dde1e",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 80,
"avg_line_length": 38.13291139240506,
"alnum_prop": 0.5689626556016597,
"repo_name": "torebutlin/cued_datalogger",
"id": "32579be9d648c3cc001c31f4c8f915e1b6c0f4f6",
"size": "6025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cued_datalogger/api/workspace.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "377380"
},
{
"name": "Roff",
"bytes": "23"
}
],
"symlink_target": ""
}
|
from geoq.core.models import Setting
import json
def app_settings(request):
"""Global values to pass to templates"""
settings_dict = dict()
settings = dict()
for obj in Setting.objects.all():
settings[obj.name] = obj.value
settings_dict['settings'] = json.dumps(settings)
return settings_dict
|
{
"content_hash": "4bf76edcb8ca2b182faa5472364cdfa5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 52,
"avg_line_length": 27.25,
"alnum_prop": 0.6788990825688074,
"repo_name": "Pkthunder/geoq",
"id": "0f10d0466582655b48ca55ea1843236e7d5f8c4f",
"size": "534",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "geoq/core/contextprocessors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "71402"
},
{
"name": "HTML",
"bytes": "203736"
},
{
"name": "JavaScript",
"bytes": "1424580"
},
{
"name": "Python",
"bytes": "727105"
}
],
"symlink_target": ""
}
|
import os
import warnings
import functools
import imath
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
class PathWidget( GafferUI.TextWidget ) :
def __init__( self, path, **kw ) :
GafferUI.TextWidget.__init__( self, str( path ), **kw )
# we can be fairly sure that the average path requires a bit more space
# than the other things that go in TextWidgets.
self.setPreferredCharacterWidth( 60 )
self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ), scoped = False )
self.selectingFinishedSignal().connect( Gaffer.WeakMethod( self.__selectingFinished ), scoped = False )
self.textChangedSignal().connect( Gaffer.WeakMethod( self.__textChanged ), scoped = False )
self.__popupMenu = None
self.__path = None
self.setPath( path )
def path( self ) :
warnings.warn( "PathWidget.path() is deprecated, use PathWidget.getPath() instead.", DeprecationWarning, 2 )
return self.__path
def setPath( self, path ) :
self.__path = path
self.__pathChangedConnection = self.__path.pathChangedSignal().connect( Gaffer.WeakMethod( self.__pathChanged, fallbackResult = None ) )
self.setText( str( self.__path ) )
def getPath( self ) :
return self.__path
def __keyPress( self, widget, event ) :
if not self.getEditable() :
return False
if event.key=="Tab" :
# do tab completion
position = self.getCursorPosition()
truncatedPath = self.__path.copy()
truncatedPath.setFromString( str( truncatedPath )[:position] )
if len( truncatedPath ) :
matchStart = truncatedPath[-1]
del truncatedPath[-1]
else :
matchStart = ""
matches = [ x[-1] for x in truncatedPath.children() if x[-1].startswith( matchStart ) ]
match = os.path.commonprefix( matches )
if match :
self.__path[:] = truncatedPath[:] + [ match ]
if len( matches )==1 and not self.__path.isLeaf() :
text = self.getText()
if not text.endswith( "/" ) :
self.setText( text + "/" )
self.setCursorPosition( len( self.getText() ) )
return True
elif event.key == "Down" :
if event.modifiers & GafferUI.ModifiableEvent.Modifiers.Shift :
# select all!
self.setSelection( None, None )
else :
text = self.getText()
position = self.getCursorPosition()
if position == len( text ) and len( text ) and text[-1]=="/" :
# select last character to trigger menu for next path entry
self.setSelection( -1, None )
else :
# select path entry around the cursor
low = text.rfind( "/", 0, position )
high = text.find( "/", position )
if low != -1 :
self.setSelection( low+1, high if high != -1 else None )
self.__popupMenuForSelection()
return True
return False
def __selectingFinished( self, widget ) :
assert( widget is self )
if self.getEditable() :
self.__popupMenuForSelection()
def __popupMenuForSelection( self ) :
start, end = self.getSelection()
if start == end :
return
text = self.getText()
selectedText = text[start:end]
if text == selectedText :
self.__popupHierarchy()
elif selectedText == "/" and end == len( text ) :
# the final slash was selected
self.__popupListing( end )
elif "/" not in selectedText and text[start-1] == "/" and ( end >= len( text ) or text[end] == "/" ) :
self.__popupListing( start )
def __popupHierarchy( self ) :
pathCopy = self.__path.copy()
md = IECore.MenuDefinition()
i = 0
while len( pathCopy ) :
md.append(
"/" + str( i ),
IECore.MenuItemDefinition(
label = str( pathCopy ),
command = functools.partial( Gaffer.WeakMethod( self.__path.setFromString ), str( pathCopy ) ),
)
)
del pathCopy[-1]
i += 1
self.__popupMenu = GafferUI.Menu( md )
self.__popupMenu.popup( parent = self, position = self.__popupPosition( 0 ), forcePosition=True, grabFocus=False )
def __popupListing( self, textIndex ) :
dirPath = self.__path.copy()
n = os.path.dirname( self.getText()[:textIndex] ) or "/"
dirPath.setFromString( n )
options = dirPath.children()
options = [ x[-1] for x in options ]
if len( options ) :
md = IECore.MenuDefinition()
for o in options :
md.append( "/" + o,
IECore.MenuItemDefinition(
label=o,
command = functools.partial( Gaffer.WeakMethod( self.__replacePathEntry ), len( dirPath ), o )
)
)
self.__popupMenu = GafferUI.Menu( md )
self.__popupMenu.popup( parent = self, position = self.__popupPosition( textIndex ), forcePosition=True, grabFocus=False )
def __replacePathEntry( self, position, newEntry ) :
if position==len( self.__path ) :
self.__path.append( newEntry )
else :
self.__path[position] = newEntry
self.__path.truncateUntilValid()
if position==len( self.__path )-1 and not self.__path.isLeaf() :
self.setText( self.getText() + "/" )
def __popupPosition( self, textIndex ) :
## \todo Surely there's a better way?
for x in range( 0, 10000 ) :
if self._qtWidget().cursorPositionAt( QtCore.QPoint( x, 5 ) ) >= textIndex :
break
bound = self.bound()
return imath.V2i( bound.min().x + x, bound.max().y )
def __pathChanged( self, path ) :
self.setText( str( path ) )
def __textChanged( self, widget ) :
text = self.getText()
with Gaffer.BlockedConnection( self.__pathChangedConnection ) :
try :
self.__path.setFromString( self.getText() )
except :
# no need to worry too much - it's up to the user to enter
# something valid. maybe they'll get it right next time.
pass
|
{
"content_hash": "7b3ad3954c22d48a339cce1de4c2124f",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 138,
"avg_line_length": 26.921951219512195,
"alnum_prop": 0.6472186990396811,
"repo_name": "lucienfostier/gaffer",
"id": "2b8dee03c0109b7e7f71d3a1de13c2214a3f97c2",
"size": "7384",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/GafferUI/PathWidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7610953"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7892655"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
}
|
from leapp.tags import Tag
class SecondPhaseTag(Tag):
name = 'second_phase'
|
{
"content_hash": "8310b456d4e1468809ed090ff641e7dc",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 26,
"avg_line_length": 16.4,
"alnum_prop": 0.7195121951219512,
"repo_name": "vinzenz/prototype",
"id": "ead6c9516e28f2aa0927b22e3cdf0e33a581455d",
"size": "82",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/data/workflow-tests/tags/secondphase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1688"
},
{
"name": "HTML",
"bytes": "35793"
},
{
"name": "Makefile",
"bytes": "927"
},
{
"name": "PLpgSQL",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "290041"
},
{
"name": "Ruby",
"bytes": "1363"
},
{
"name": "Shell",
"bytes": "1416"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.timezone import now
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from filtered_contenttypes.fields import FilteredGenericForeignKey
from django_pgjson.fields import JsonBField
from .managers import ActionManager
from . import settings as app_settings
from . import registry
def _default_action_meta():
return {}
class Action(models.Model):
item_type = models.ForeignKey(ContentType, related_name='actions')
item_id = models.PositiveIntegerField()
item = FilteredGenericForeignKey('item_type', 'item_id')
target_type = models.ForeignKey(ContentType, blank=True, null=True,
related_name='target_actions')
target_id = models.PositiveIntegerField(blank=True, null=True)
target = FilteredGenericForeignKey('target_type', 'target_id')
actor = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='activity')
verb = models.CharField(max_length=23,
choices=registry.as_model_choices())
published = models.DateTimeField(auto_now_add=True)
meta = JsonBField(default=_default_action_meta, blank=True)
objects = ActionManager()
class Meta:
abstract = app_settings.get('ACTION_MODEL') != 'simple_activity.Action'
ordering = ('-published',)
@classmethod
def add_action(klass, verb, actor, item, target=None, published=None,
meta={}):
if not registry.is_valid(verb):
raise ValueError('`{}` not a valid verb.'.format(verb))
published = published or now()
create_kwargs = {'actor': actor, 'item': item, 'verb': verb.code}
if target:
create_kwargs['target'] = target
create_kwargs['published'] = published
klass.objects.create(**create_kwargs)
@property
def verb_object(self):
return registry.get_from_code(self.verb)
|
{
"content_hash": "b8d6ad3c038f4cc8dcfaa6cb74c162d4",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 80,
"avg_line_length": 35.72727272727273,
"alnum_prop": 0.6727735368956743,
"repo_name": "owais/django-simple-activity",
"id": "69fa520dded44168c6ccb367b112f756aea4bd4a",
"size": "1965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_activity/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9574"
}
],
"symlink_target": ""
}
|
import pytest
pytestmark = pytest.mark.nondestructive
def test_auth_with_company_id_and_psk(public_api_v1):
result = public_api_v1.authentication()
assert result and result.get('success')
def test_auth_with_auth_file(public_api_v1_auth_file):
result = public_api_v1_auth_file.authentication()
assert result and result.get('success')
def test_auth_with_auth_file_content(public_api_v1_auth_file_content):
result = public_api_v1_auth_file_content.authentication()
assert result and result.get('success')
def test_hosts(public_api_v1):
public_api_v1.hosts()
def test_hardware_fields(public_api_v1):
public_api_v1.hardware_fields()
def test_system_fields(public_api_v1):
public_api_v1.system_fields()
@pytest.mark.xfail
def test_hardware_report(public_api_v1):
public_api_v1.hardware_report()
@pytest.mark.xfail
def test_system_report(public_api_v1):
public_api_v1.system_report()
|
{
"content_hash": "1a8fdfc56c40461594f060a71ef93916",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 70,
"avg_line_length": 23.45,
"alnum_prop": 0.7281449893390192,
"repo_name": "ninemoreminutes/lmiapi",
"id": "4c747049e97cc2eea0489dc413c0cecc273cf4cb",
"size": "948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_public_v1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "Python",
"bytes": "33393"
}
],
"symlink_target": ""
}
|
import cPickle as pickle
import json
import os
import unittest
from gzip import GzipFile
from shutil import rmtree
from time import time
from distutils.dir_util import mkpath
from eventlet import spawn, Timeout, listen
from swift.obj import updater as object_updater, server as object_server
from swift.obj.server import ASYNCDIR
from swift.common.ring import RingData
from swift.common import utils
from swift.common.utils import hash_path, normalize_timestamp, mkdirs, \
write_pickle
from test.unit import FakeLogger
class TestObjectUpdater(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
self.testdir = os.path.join(os.path.dirname(__file__),
'object_updater')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
pickle.dump(
RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'ip': '127.0.0.1', 'port': 1, 'device': 'sda1',
'zone': 0},
{'id': 1, 'ip': '127.0.0.1', 'port': 1, 'device': 'sda1',
'zone': 2}], 30),
GzipFile(os.path.join(self.testdir, 'container.ring.gz'), 'wb'))
self.devices_dir = os.path.join(self.testdir, 'devices')
os.mkdir(self.devices_dir)
self.sda1 = os.path.join(self.devices_dir, 'sda1')
os.mkdir(self.sda1)
os.mkdir(os.path.join(self.sda1, 'tmp'))
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_creation(self):
cu = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '2',
'node_timeout': '5'})
self.assert_(hasattr(cu, 'logger'))
self.assert_(cu.logger is not None)
self.assertEquals(cu.devices, self.devices_dir)
self.assertEquals(cu.interval, 1)
self.assertEquals(cu.concurrency, 2)
self.assertEquals(cu.node_timeout, 5)
self.assert_(cu.get_container_ring() is not None)
def test_object_sweep(self):
prefix_dir = os.path.join(self.sda1, ASYNCDIR, 'abc')
mkpath(prefix_dir)
objects = {
'a': [1089.3, 18.37, 12.83, 1.3],
'b': [49.4, 49.3, 49.2, 49.1],
'c': [109984.123],
}
expected = set()
for o, timestamps in objects.iteritems():
ohash = hash_path('account', 'container', o)
for t in timestamps:
o_path = os.path.join(prefix_dir, ohash + '-' +
normalize_timestamp(t))
if t == timestamps[0]:
expected.add(o_path)
write_pickle({}, o_path)
seen = set()
class MockObjectUpdater(object_updater.ObjectUpdater):
def process_object_update(self, update_path, device):
seen.add(update_path)
os.unlink(update_path)
cu = MockObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '5'})
cu.object_sweep(self.sda1)
self.assert_(not os.path.exists(prefix_dir))
self.assertEqual(expected, seen)
def test_run_once(self):
cu = object_updater.ObjectUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15'})
cu.run_once()
async_dir = os.path.join(self.sda1, object_server.ASYNCDIR)
os.mkdir(async_dir)
cu.run_once()
self.assert_(os.path.exists(async_dir))
odd_dir = os.path.join(async_dir, 'not really supposed to be here')
os.mkdir(odd_dir)
cu.run_once()
self.assert_(os.path.exists(async_dir))
self.assert_(not os.path.exists(odd_dir))
ohash = hash_path('a', 'c', 'o')
odir = os.path.join(async_dir, ohash[-3:])
mkdirs(odir)
older_op_path = os.path.join(
odir,
'%s-%s' % (ohash, normalize_timestamp(time() - 1)))
op_path = os.path.join(
odir,
'%s-%s' % (ohash, normalize_timestamp(time())))
for path in (op_path, older_op_path):
with open(path, 'wb') as async_pending:
pickle.dump({'op': 'PUT', 'account': 'a', 'container': 'c',
'obj': 'o', 'headers': {
'X-Container-Timestamp': normalize_timestamp(0)}},
async_pending)
cu.logger = FakeLogger()
cu.run_once()
self.assert_(not os.path.exists(older_op_path))
self.assert_(os.path.exists(op_path))
self.assertEqual(cu.logger.get_increment_counts(),
{'failures': 1, 'unlinks': 1})
bindsock = listen(('127.0.0.1', 0))
def accepter(sock, return_code):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assert_('x-container-timestamp' in headers)
except BaseException, err:
return err
return None
def accept(return_codes):
codes = iter(return_codes)
try:
events = []
for x in xrange(len(return_codes)):
with Timeout(3):
sock, addr = bindsock.accept()
events.append(
spawn(accepter, sock, codes.next()))
for event in events:
err = event.wait()
if err:
raise err
except BaseException, err:
return err
return None
event = spawn(accept, [201, 500])
for dev in cu.get_container_ring().devs:
if dev is not None:
dev['port'] = bindsock.getsockname()[1]
cu.logger = FakeLogger()
cu.run_once()
err = event.wait()
if err:
raise err
self.assert_(os.path.exists(op_path))
self.assertEqual(cu.logger.get_increment_counts(),
{'failures': 1})
event = spawn(accept, [201])
cu.logger = FakeLogger()
cu.run_once()
err = event.wait()
if err:
raise err
self.assert_(not os.path.exists(op_path))
self.assertEqual(cu.logger.get_increment_counts(),
{'unlinks': 1, 'successes': 1})
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "44ba0cb175d2473776bbf46417eb3bf7",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 79,
"avg_line_length": 36.07729468599034,
"alnum_prop": 0.5002678093197643,
"repo_name": "Mirantis/swift-encrypt",
"id": "104325b6acb3517df26a13b3b4dc78eb913159e3",
"size": "8058",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/unit/obj/test_updater.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2710047"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class X12DelimiterOverrides(Model):
"""X12DelimiterOverrides.
:param protocol_version: The protocol version.
:type protocol_version: str
:param message_id: The message id.
:type message_id: str
:param data_element_separator: The data element separator.
:type data_element_separator: int
:param component_separator: The component separator.
:type component_separator: int
:param segment_terminator: The segment terminator.
:type segment_terminator: int
:param segment_terminator_suffix: The segment terminator suffix. Possible
values include: 'NotSpecified', 'None', 'CR', 'LF', 'CRLF'
:type segment_terminator_suffix: str or :class:`SegmentTerminatorSuffix
<azure.mgmt.logic.models.SegmentTerminatorSuffix>`
:param replace_character: The replacement character.
:type replace_character: int
:param replace_separators_in_payload: The value indicating whether to
replace separators in payload.
:type replace_separators_in_payload: bool
:param target_namespace: The target namespace on which this delimiter
settings has to be applied.
:type target_namespace: str
"""
_attribute_map = {
'protocol_version': {'key': 'protocolVersion', 'type': 'str'},
'message_id': {'key': 'messageId', 'type': 'str'},
'data_element_separator': {'key': 'dataElementSeparator', 'type': 'int'},
'component_separator': {'key': 'componentSeparator', 'type': 'int'},
'segment_terminator': {'key': 'segmentTerminator', 'type': 'int'},
'segment_terminator_suffix': {'key': 'segmentTerminatorSuffix', 'type': 'SegmentTerminatorSuffix'},
'replace_character': {'key': 'replaceCharacter', 'type': 'int'},
'replace_separators_in_payload': {'key': 'replaceSeparatorsInPayload', 'type': 'bool'},
'target_namespace': {'key': 'targetNamespace', 'type': 'str'},
}
def __init__(self, protocol_version=None, message_id=None, data_element_separator=None, component_separator=None, segment_terminator=None, segment_terminator_suffix=None, replace_character=None, replace_separators_in_payload=None, target_namespace=None):
self.protocol_version = protocol_version
self.message_id = message_id
self.data_element_separator = data_element_separator
self.component_separator = component_separator
self.segment_terminator = segment_terminator
self.segment_terminator_suffix = segment_terminator_suffix
self.replace_character = replace_character
self.replace_separators_in_payload = replace_separators_in_payload
self.target_namespace = target_namespace
|
{
"content_hash": "aae980947c5dd74b62b6a1c3f6f0cf31",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 258,
"avg_line_length": 52.01923076923077,
"alnum_prop": 0.6979667282809612,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "b3fcdfc6c1e5b37b1b4fecee07fa146232e5b13e",
"size": "3179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-logic/azure/mgmt/logic/models/x12_delimiter_overrides.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
}
|
import sys
from PIL import Image, ImageFilter
from mpi4py import MPI
import colorsys
from math import ceil
import pickle
import time
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
name = MPI.Get_processor_name()
if rank == 0:
start_time = time.time()
#Open original image and calculate the division of it
#original = Image.open("/home/pi/images/syntspec.jpg")
original = Image.open("/home/pi/images/" + sys.argv[1])
width, height = original.size
oneThird = width / 3
first = 0 + oneThird
second = first + oneThird
#Remove the first third and send it to Process Rank 1
firstThird = original.crop((0, 0, first, height))
firstThirdImageData = {
'pixels':firstThird.tobytes(),
'size':firstThird.size,
'mode':firstThird.mode,
}
firstThirdToSend = {'image':firstThirdImageData}
comm.send(firstThirdToSend, dest = 1)
#Remove the second third and send it to Process Rank 2
secondThird = original.crop((first, 0, second, height))
secondThirdImageData = {
'pixels':secondThird.tobytes(),
'size':secondThird.size,
'mode':secondThird.mode,
}
secondThirdToSend = {'image':secondThirdImageData}
comm.send(secondThirdToSend, dest = 2)
#Remove the third third and send it to Process Rank 3
thirdThird = original.crop((second, 0, width, height))
thirdThirdImageData = {
'pixels':thirdThird.tobytes(),
'size':thirdThird.size,
'mode':thirdThird.mode,
}
thirdThirdToSend = {'image':thirdThirdImageData}
comm.send(thirdThirdToSend, dest = 3)
#Receive filtered image portions back from leaf nodes
firstFilterData = comm.recv(source = 1)
firstFilter = firstFilterData.get('image')
firstFilterPixels = firstFilter.get('pixels')
firstFilterSize = firstFilter.get('size')
firstFilterMode = firstFilter.get('mode')
firstThirdFilter = Image.frombytes(firstFilterMode, firstFilterSize, firstFilterPixels)
secondFilterData = comm.recv(source = 2)
secondFilter = secondFilterData.get('image')
secondFilterPixels = secondFilter.get('pixels')
secondFilterSize = secondFilter.get('size')
secondFilterMode = secondFilter.get('mode')
secondThirdFilter = Image.frombytes(secondFilterMode, secondFilterSize, secondFilterPixels)
thirdFilterData = comm.recv(source = 3)
thirdFilter = thirdFilterData.get('image')
thirdFilterPixels = thirdFilter.get('pixels')
thirdFilterSize = thirdFilter.get('size')
thirdFilterMode = thirdFilter.get('mode')
thirdThirdFilter = Image.frombytes(thirdFilterMode, thirdFilterSize, thirdFilterPixels)
#Reassemble and save altered image
finalImage = Image.new("RGB", (width, height))
finalImage.paste(firstThirdFilter, (0, 0))
finalImage.paste(secondThirdFilter, (first, 0))
finalImage.paste(thirdThirdFilter, (second, 0))
finalImage.save("/home/pi/images/FinalSyntSpec.jpg")
finalImage.save("/home/pi/divide_manipulate/results/" + sys.argv[1])
print time.time() - start_time, "seconds"
if rank == 1:
#Receive image portion from head node
data = comm.recv(source = 0)
receivedImageData = data.get('image')
receivedImagePixels = receivedImageData.get('pixels')
receivedImageSize = receivedImageData.get('size')
receivedImageMode = receivedImageData.get('mode')
receivedImage = Image.frombytes(receivedImageMode, receivedImageSize, receivedImagePixels)
firstThirdFilter = receivedImage.filter(ImageFilter.BLUR)
#Send filtered image back to head node
firstThirdFilterImageData = {
'pixels':firstThirdFilter.tobytes(),
'size':firstThirdFilter.size,
'mode':firstThirdFilter.mode,
}
firstThirdFilterToSend = {'image':firstThirdFilterImageData}
comm.send(firstThirdFilterToSend, dest = 0)
if rank == 2:
#Receive image portion from head node
data = comm.recv(source = 0)
receivedImageData = data.get('image')
receivedImagePixels = receivedImageData.get('pixels')
receivedImageSize = receivedImageData.get('size')
receivedImageMode = receivedImageData.get('mode')
receivedImage = Image.frombytes(receivedImageMode, receivedImageSize, receivedImagePixels)
secondThirdFilter = receivedImage.filter(ImageFilter.EMBOSS)
#Send filtered image back to head node
secondThirdFilterImageData = {
'pixels':secondThirdFilter.tobytes(),
'size':secondThirdFilter.size,
'mode':secondThirdFilter.mode,
}
secondThirdFilterToSend = {'image':secondThirdFilterImageData}
comm.send(secondThirdFilterToSend, dest = 0)
if rank == 3:
#Receive image portion from head node
data = comm.recv(source = 0)
receivedImageData = data.get('image')
receivedImagePixels = receivedImageData.get('pixels')
receivedImageSize = receivedImageData.get('size')
receivedImageMode = receivedImageData.get('mode')
receivedImage = Image.frombytes(receivedImageMode, receivedImageSize, receivedImagePixels)
thirdThirdFilter = receivedImage.filter(ImageFilter.EDGE_ENHANCE)
#Send filtered data back to head node
thirdThirdFilterImageData = {
'pixels':thirdThirdFilter.tobytes(),
'size':thirdThirdFilter.size,
'mode':thirdThirdFilter.mode,
}
thirdThirdFilterToSend = {'image':thirdThirdFilterImageData}
comm.send(thirdThirdFilterToSend, dest = 0)
|
{
"content_hash": "30dab8b214d5b59071d4493d7d33f1b2",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 93,
"avg_line_length": 36.638297872340424,
"alnum_prop": 0.7468060394889663,
"repo_name": "BrennaBlackwell/PythonForPi",
"id": "ef700e6f7677819a9858cdefb088a125d6a96b90",
"size": "5363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project code/divide image mpi/DivideAndRecreateCluster.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "312"
},
{
"name": "Python",
"bytes": "19711"
},
{
"name": "Shell",
"bytes": "21207"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.