text stringlengths 4 1.02M | meta dict |
|---|---|
"""
celery.loaders.base
~~~~~~~~~~~~~~~~~~~
Loader base class.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import importlib
import os
import re
import warnings
from anyjson import deserialize
from ..datastructures import DictAttribute
from ..exceptions import ImproperlyConfigured
from ..utils import (cached_property, get_cls_by_name,
import_from_cwd as _import_from_cwd)
from ..utils.functional import maybe_list
BUILTIN_MODULES = frozenset(["celery.task"])
ERROR_ENVVAR_NOT_SET = (
"""The environment variable %r is not set,
and as such the configuration could not be loaded.
Please set this variable and make it point to
a configuration module.""")
class BaseLoader(object):
"""The base class for loaders.
Loaders handles,
* Reading celery client/worker configurations.
* What happens when a task starts?
See :meth:`on_task_init`.
* What happens when the worker starts?
See :meth:`on_worker_init`.
* What modules are imported to find tasks?
"""
builtin_modules = BUILTIN_MODULES
configured = False
error_envvar_not_set = ERROR_ENVVAR_NOT_SET
override_backends = {}
worker_initialized = False
_conf = None
def __init__(self, app=None, **kwargs):
from ..app import app_or_default
self.app = app_or_default(app)
def on_task_init(self, task_id, task):
"""This method is called before a task is executed."""
pass
def on_process_cleanup(self):
"""This method is called after a task is executed."""
pass
def on_worker_init(self):
"""This method is called when the worker (:program:`celeryd`)
starts."""
pass
def on_worker_process_init(self):
"""This method is called when a child process starts."""
pass
def import_task_module(self, module):
return self.import_from_cwd(module)
def import_module(self, module, package=None):
return importlib.import_module(module, package=package)
def import_from_cwd(self, module, imp=None, package=None):
return _import_from_cwd(module,
self.import_module if imp is None else imp,
package=package)
def import_default_modules(self):
imports = set(maybe_list(self.conf.get("CELERY_IMPORTS") or ()))
return [self.import_task_module(module)
for module in imports | self.builtin_modules]
def init_worker(self):
if not self.worker_initialized:
self.worker_initialized = True
self.on_worker_init()
def init_worker_process(self):
self.on_worker_process_init()
def config_from_envvar(self, variable_name, silent=False):
module_name = os.environ.get(variable_name)
if not module_name:
if silent:
return False
raise ImproperlyConfigured(self.error_envvar_not_set % module_name)
return self.config_from_object(module_name, silent=silent)
def config_from_object(self, obj, silent=False):
if isinstance(obj, basestring):
try:
if "." in obj:
obj = get_cls_by_name(obj, imp=self.import_from_cwd)
else:
obj = self.import_from_cwd(obj)
except (ImportError, AttributeError):
if silent:
return False
raise
if not hasattr(obj, "__getitem__"):
obj = DictAttribute(obj)
self._conf = obj
return True
def cmdline_config_parser(self, args, namespace="celery",
re_type=re.compile(r"\((\w+)\)"),
extra_types={"json": deserialize},
override_types={"tuple": "json",
"list": "json",
"dict": "json"}):
from ..app.defaults import Option, NAMESPACES
namespace = namespace.upper()
typemap = dict(Option.typemap, **extra_types)
def getarg(arg):
"""Parse a single configuration definition from
the command line."""
## find key/value
# ns.key=value|ns_key=value (case insensitive)
key, value = arg.split('=', 1)
key = key.upper().replace(".", "_")
## find namespace.
# .key=value|_key=value expands to default namespace.
if key[0] == '_':
ns, key = namespace, key[1:]
else:
# find namespace part of key
ns, key = key.split('_', 1)
ns_key = (ns and ns + "_" or "") + key
# (type)value makes cast to custom type.
cast = re_type.match(value)
if cast:
type_ = cast.groups()[0]
type_ = override_types.get(type_, type_)
value = value[len(cast.group()):]
value = typemap[type_](value)
else:
try:
value = NAMESPACES[ns][key].to_python(value)
except ValueError, exc:
# display key name in error message.
raise ValueError("%r: %s" % (ns_key, exc))
return ns_key, value
return dict(map(getarg, args))
def mail_admins(self, subject, body, fail_silently=False,
sender=None, to=None, host=None, port=None,
user=None, password=None, timeout=None,
use_ssl=False, use_tls=False):
try:
message = self.mail.Message(sender=sender, to=to,
subject=subject, body=body)
mailer = self.mail.Mailer(host=host, port=port,
user=user, password=password,
timeout=timeout, use_ssl=use_ssl,
use_tls=use_tls)
mailer.send(message)
except Exception, exc:
if not fail_silently:
raise
warnings.warn(self.mail.SendmailWarning(
"Mail could not be sent: %r %r" % (
exc, {"To": to, "Subject": subject})))
@property
def conf(self):
"""Loader configuration."""
if self._conf is None:
self._conf = self.read_configuration()
return self._conf
@cached_property
def mail(self):
return self.import_module("celery.utils.mail")
| {
"content_hash": "7699cd2e8ad982e6885869a148ee84db",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 79,
"avg_line_length": 32.633663366336634,
"alnum_prop": 0.5505157766990292,
"repo_name": "KarimAllah/celery",
"id": "61a0e5dd8096a01d79e23323717099c3c753a8e7",
"size": "6616",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "celery/loaders/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1077395"
},
{
"name": "Shell",
"bytes": "40914"
}
],
"symlink_target": ""
} |
import io
import json
import os
import unittest
from . import specimen
from .fhirdate import FHIRDate
class SpecimenTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Specimen", js["resourceType"])
return specimen.Specimen(js)
def testSpecimen1(self):
inst = self.instantiate_from("specimen-example-isolate.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen1(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen1(inst2)
def implSpecimen1(self, inst):
self.assertEqual(inst.accessionIdentifier.system, "http://lab.acme.org/specimens/2011")
self.assertEqual(inst.accessionIdentifier.value, "X352356-ISO1")
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2015-08-16T07:03:00Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2015-08-16T07:03:00Z")
self.assertEqual(inst.collection.method.coding[0].code, "BAP")
self.assertEqual(inst.collection.method.coding[0].system, "http://hl7.org/fhir/v2/0488")
self.assertEqual(inst.contained[0].id, "stool")
self.assertEqual(inst.id, "isolate")
self.assertEqual(inst.note[0].text, "Patient dropped off specimen")
self.assertEqual(inst.receivedTime.date, FHIRDate("2015-08-18T07:03:00Z").date)
self.assertEqual(inst.receivedTime.as_json(), "2015-08-18T07:03:00Z")
self.assertEqual(inst.status, "available")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "429951000124103")
self.assertEqual(inst.type.coding[0].display, "Bacterial isolate specimen")
self.assertEqual(inst.type.coding[0].system, "http://snomed.info/sct")
def testSpecimen2(self):
inst = self.instantiate_from("specimen-example-serum.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen2(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen2(inst2)
def implSpecimen2(self, inst):
self.assertEqual(inst.accessionIdentifier.system, "http://acme.com/labs/accession-ids")
self.assertEqual(inst.accessionIdentifier.value, "20150816-00124")
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2015-08-16T06:40:17Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2015-08-16T06:40:17Z")
self.assertEqual(inst.container[0].type.coding[0].code, "SST")
self.assertEqual(inst.container[0].type.coding[0].display, "Serum Separator Tube")
self.assertEqual(inst.container[0].type.coding[0].system, "http://acme.com/labs")
self.assertEqual(inst.id, "sst")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "119364003")
self.assertEqual(inst.type.coding[0].display, "Serum sample")
self.assertEqual(inst.type.coding[0].system, "http://snomed.info/sct")
def testSpecimen3(self):
inst = self.instantiate_from("specimen-example-urine.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen3(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen3(inst2)
def implSpecimen3(self, inst):
self.assertEqual(inst.accessionIdentifier.system, "http://lab.acme.org/specimens/2015")
self.assertEqual(inst.accessionIdentifier.value, "X352356")
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2015-08-18T07:03:00Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2015-08-18T07:03:00Z")
self.assertEqual(inst.container[0].capacity.unit, "mls")
self.assertEqual(inst.container[0].capacity.value, 50)
self.assertEqual(inst.container[0].specimenQuantity.unit, "mls")
self.assertEqual(inst.container[0].specimenQuantity.value, 10)
self.assertEqual(inst.container[0].type.text, "Non-sterile specimen container")
self.assertEqual(inst.id, "vma-urine")
self.assertEqual(inst.processing[0].description, "Acidify to pH < 3.0 with 6 N HCl.")
self.assertEqual(inst.processing[0].procedure.coding[0].code, "ACID")
self.assertEqual(inst.processing[0].procedure.coding[0].system, "http://hl7.org/fhir/v2/0373")
self.assertEqual(inst.processing[0].timeDateTime.date, FHIRDate("2015-08-18T08:10:00Z").date)
self.assertEqual(inst.processing[0].timeDateTime.as_json(), "2015-08-18T08:10:00Z")
self.assertEqual(inst.receivedTime.date, FHIRDate("2015-08-18T07:03:00Z").date)
self.assertEqual(inst.receivedTime.as_json(), "2015-08-18T07:03:00Z")
self.assertEqual(inst.status, "available")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "RANDU")
self.assertEqual(inst.type.coding[0].display, "Urine, Random")
self.assertEqual(inst.type.coding[0].system, "http://hl7.org/fhir/v2/0487")
def testSpecimen4(self):
inst = self.instantiate_from("specimen-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Specimen instance")
self.implSpecimen4(inst)
js = inst.as_json()
self.assertEqual("Specimen", js["resourceType"])
inst2 = specimen.Specimen(js)
self.implSpecimen4(inst2)
def implSpecimen4(self, inst):
self.assertEqual(inst.accessionIdentifier.system, "http://lab.acme.org/specimens/2011")
self.assertEqual(inst.accessionIdentifier.value, "X352356")
self.assertEqual(inst.collection.bodySite.coding[0].code, "49852007")
self.assertEqual(inst.collection.bodySite.coding[0].display, "Structure of median cubital vein (body structure)")
self.assertEqual(inst.collection.bodySite.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.collection.bodySite.text, "Right median cubital vein")
self.assertEqual(inst.collection.collectedDateTime.date, FHIRDate("2011-05-30T06:15:00Z").date)
self.assertEqual(inst.collection.collectedDateTime.as_json(), "2011-05-30T06:15:00Z")
self.assertEqual(inst.collection.method.coding[0].code, "LNV")
self.assertEqual(inst.collection.method.coding[0].system, "http://hl7.org/fhir/v2/0488")
self.assertEqual(inst.collection.quantity.unit, "mL")
self.assertEqual(inst.collection.quantity.value, 6)
self.assertEqual(inst.contained[0].id, "hep")
self.assertEqual(inst.container[0].capacity.unit, "mL")
self.assertEqual(inst.container[0].capacity.value, 10)
self.assertEqual(inst.container[0].description, "Green Gel tube")
self.assertEqual(inst.container[0].identifier[0].value, "48736-15394-75465")
self.assertEqual(inst.container[0].specimenQuantity.unit, "mL")
self.assertEqual(inst.container[0].specimenQuantity.value, 6)
self.assertEqual(inst.container[0].type.text, "Vacutainer")
self.assertEqual(inst.id, "101")
self.assertEqual(inst.identifier[0].system, "http://ehr.acme.org/identifiers/collections")
self.assertEqual(inst.identifier[0].value, "23234352356")
self.assertEqual(inst.note[0].text, "Specimen is grossly lipemic")
self.assertEqual(inst.receivedTime.date, FHIRDate("2011-03-04T07:03:00Z").date)
self.assertEqual(inst.receivedTime.as_json(), "2011-03-04T07:03:00Z")
self.assertEqual(inst.status, "available")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "122555007")
self.assertEqual(inst.type.coding[0].display, "Venous blood specimen")
self.assertEqual(inst.type.coding[0].system, "http://snomed.info/sct")
| {
"content_hash": "e67bc24273f53c68d41337e123ab0317",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 121,
"avg_line_length": 57.02054794520548,
"alnum_prop": 0.6923723723723724,
"repo_name": "all-of-us/raw-data-repository",
"id": "e9bafea0156d0d5b7dc0d47447861547e71842b6",
"size": "8451",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/lib_fhir/fhirclient_3_0_0/models/specimen_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
import tweepy
from random import randint
from time import sleep
from API_KEYS import TWITTER_CONSUMER
from API_KEYS import TWITTER_CONSUMER_SECRET
from API_KEYS import TWITTER_ACCESS
from API_KEYS import TWITTER_ACCESS_SECRET
# keys
consumer_key = TWITTER_CONSUMER
consumer_secret = TWITTER_CONSUMER_SECRET
access_token = TWITTER_ACCESS
access_token_secret = TWITTER_ACCESS_SECRET
# authentication with Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# open stallman file
stallman_file = open('free_as_in_freedom.txt', 'r')
file_lines = stallman_file.readlines()
stallman_file.close()
# iterate through document
for line in file_lines:
try:
print(line)
if line != '\n':
api.update_status(line)
else:
pass
except tweepy.TweepError as e:
print(e.reason)
sleep(randint(600,3600))
| {
"content_hash": "05d695421b115cd3d4a309e1fa33b64f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 57,
"avg_line_length": 27.057142857142857,
"alnum_prop": 0.7275607180570222,
"repo_name": "inflatus/Python",
"id": "2a88c1b16e8a9df1962fce18d329e8aac31a7132",
"size": "1180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scraping/stallman_tweet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22824"
},
{
"name": "Shell",
"bytes": "742"
}
],
"symlink_target": ""
} |
"""
fs.expose.dokan.libdokan: low-level ctypes interface to Dokan
"""
from ctypes import *
try:
DokanMain = windll.Dokan1.DokanMain
DokanVersion = windll.Dokan1.DokanVersion
except AttributeError:
raise ImportError("Dokan DLL not found")
from ctypes.wintypes import *
ULONG64 = c_ulonglong
PULONGLONG = POINTER(c_ulonglong)
PVOID = c_void_p
PULONG = POINTER(c_ulong)
UCHAR = c_ubyte
LPDWORD = POINTER(c_ulong)
LONGLONG = c_longlong
NTSTATUS = c_long
USHORT = c_ushort
WCHAR = c_wchar
DokanVersion.restype = ULONG
DokanVersion.argtypes = ()
DOKAN_MINIMUM_COMPATIBLE_VERSION = 100 # this is release 1.0.0
if DokanVersion() < DOKAN_MINIMUM_COMPATIBLE_VERSION:
raise ImportError("Dokan DLL is too old")
MAX_PATH = 260
class SECURITY_DESCRIPTOR(Structure): pass
PSECURITY_DESCRIPTOR = POINTER(SECURITY_DESCRIPTOR)
PPSECURITY_DESCRIPTOR = POINTER(PSECURITY_DESCRIPTOR)
SECURITY_INFORMATION = DWORD
PSECURITY_INFORMATION = POINTER(SECURITY_INFORMATION)
class FILETIME(Structure):
_fields_ = [
("dwLowDateTime", DWORD),
("dwHighDateTime", DWORD),
]
class WIN32_FIND_DATAW(Structure):
_fields_ = [
("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", WCHAR * MAX_PATH),
("cAlternateFileName", WCHAR * 14),
]
class BY_HANDLE_FILE_INFORMATION(Structure):
_fields_ = [
('dwFileAttributes', DWORD),
('ftCreationTime', FILETIME),
('ftLastAccessTime', FILETIME),
('ftLastWriteTime', FILETIME),
('dwVolumeSerialNumber', DWORD),
('nFileSizeHigh', DWORD),
('nFileSizeLow', DWORD),
('nNumberOfLinks', DWORD),
('nFileIndexHigh', DWORD),
('nFileIndexLow', DWORD),
]
class DOKAN_OPTIONS(Structure):
_fields_ = [
("Version", USHORT),
("ThreadCount", USHORT),
("Options", ULONG),
("GlobalContext", ULONG64),
("MountPoint", LPCWSTR),
("UNCName", LPCWSTR),
("Timeout", ULONG),
("AllocationUnitSize", ULONG),
("SectorSize", ULONG),
]
class DOKAN_FILE_INFO(Structure):
_fields_ = [
("Context", ULONG64),
("DokanContext", ULONG64),
("DokanOptions", POINTER(DOKAN_OPTIONS)),
("ProcessId", ULONG),
("IsDirectory", UCHAR),
("DeleteOnClose", UCHAR),
("PagingIO", UCHAR),
("SyncronousIo", UCHAR),
("Nocache", UCHAR),
("WriteToEndOfFile", UCHAR),
]
PDOKAN_FILE_INFO = POINTER(DOKAN_FILE_INFO)
PFillFindData = WINFUNCTYPE(c_int, POINTER(WIN32_FIND_DATAW), PDOKAN_FILE_INFO)
class DOKAN_OPERATIONS(Structure):
_fields_ = [
("ZwCreateFile", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
PVOID, # SecurityContext, see
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff550613(v=vs.85).aspx
DWORD, # DesiredAccess
ULONG, # FileAttributes
ULONG, # ShareAccess
ULONG, # CreateDisposition
ULONG, # CreateOptions
PDOKAN_FILE_INFO)),
("Cleanup", WINFUNCTYPE(None,
LPCWSTR, # FileName
PDOKAN_FILE_INFO)),
("CloseFile", WINFUNCTYPE(None,
LPCWSTR, # FileName
PDOKAN_FILE_INFO)),
("ReadFile", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
LPVOID, # Buffer
DWORD, # NumberOfBytesToRead
LPDWORD, # NumberOfBytesRead
LONGLONG, # Offset
PDOKAN_FILE_INFO)),
("WriteFile", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
LPCVOID, # Buffer
DWORD, # NumberOfBytesToWrite
LPDWORD, # NumberOfBytesWritten
LONGLONG, # Offset
PDOKAN_FILE_INFO)),
("FlushFileBuffers", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
PDOKAN_FILE_INFO)),
("GetFileInformation", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
POINTER(BY_HANDLE_FILE_INFORMATION), # Buffer
PDOKAN_FILE_INFO)),
("FindFiles", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # PathName
PFillFindData, # call this function with PWIN32_FIND_DATAW
PDOKAN_FILE_INFO)),
("FindFilesWithPattern", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # PathName
LPCWSTR, # SearchPattern
PFillFindData, #call this function with PWIN32_FIND_DATAW
PDOKAN_FILE_INFO)),
("SetFileAttributes", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
DWORD, # FileAttributes
PDOKAN_FILE_INFO)),
("SetFileTime", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
POINTER(FILETIME), # CreationTime
POINTER(FILETIME), # LastAccessTime
POINTER(FILETIME), # LastWriteTime
PDOKAN_FILE_INFO)),
("DeleteFile", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
PDOKAN_FILE_INFO)),
("DeleteDirectory", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
PDOKAN_FILE_INFO)),
("MoveFile", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # ExistingFileName
LPCWSTR, # NewFileName
BOOL, # ReplaceExisiting
PDOKAN_FILE_INFO)),
("SetEndOfFile", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
LONGLONG, # Length
PDOKAN_FILE_INFO)),
("SetAllocationSize", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
LONGLONG, # Length
PDOKAN_FILE_INFO)),
("LockFile", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
LONGLONG, # ByteOffset
LONGLONG, # Length
PDOKAN_FILE_INFO)),
("UnlockFile", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
LONGLONG, # ByteOffset
LONGLONG, # Length
PDOKAN_FILE_INFO)),
("GetDiskFreeSpace", WINFUNCTYPE(NTSTATUS,
PULONGLONG, # FreeBytesAvailable
PULONGLONG, # TotalNumberOfBytes
PULONGLONG, # TotalNumberOfFreeBytes
PDOKAN_FILE_INFO)),
("GetVolumeInformation", WINFUNCTYPE(NTSTATUS,
PVOID, # VolumeNameBuffer
DWORD, # VolumeNameSize in num of chars
LPDWORD, # VolumeSerialNumber
LPDWORD, # MaximumComponentLength in num of chars
LPDWORD, # FileSystemFlags
PVOID, # FileSystemNameBuffer
DWORD, # FileSystemNameSize in num of chars
PDOKAN_FILE_INFO)),
("Mounted", WINFUNCTYPE(NTSTATUS,
PDOKAN_FILE_INFO)),
("Unmounted", WINFUNCTYPE(NTSTATUS,
DOKAN_FILE_INFO)),
("GetFileSecurity", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
PULONG, # A pointer to SECURITY_INFORMATION value being requested
PVOID, # A pointer to SECURITY_DESCRIPTOR buffer to be filled
ULONG, # Length of Security descriptor buffer
PULONG, # Length Needed
PDOKAN_FILE_INFO)),
("SetFileSecurity", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
PVOID, # A pointer to SECURITY_INFORMATION value being
PVOID, # A pointer to SECURITY_DESCRIPTOR buffer
ULONG, # Length of Security descriptor buffer
PDOKAN_FILE_INFO)),
("FindStreams", WINFUNCTYPE(NTSTATUS,
LPCWSTR, # FileName
PVOID, # call this function with PWIN32_FIND_STREAM_DATA
PDOKAN_FILE_INFO))
]
DokanMain.restype = c_int
DokanMain.argtypes = (
POINTER(DOKAN_OPTIONS),
POINTER(DOKAN_OPERATIONS),
)
DokanRemoveMountPoint = windll.Dokan1.DokanRemoveMountPoint
DokanRemoveMountPoint.restype = BOOL
DokanRemoveMountPoint.argtypes = (
LPCWSTR,
)
DokanIsNameInExpression = windll.Dokan1.DokanIsNameInExpression
DokanIsNameInExpression.restype = BOOL
DokanIsNameInExpression.argtypes = (
LPCWSTR, # pattern
LPCWSTR, # name
BOOL, # ignore case
)
DokanDriverVersion = windll.Dokan1.DokanDriverVersion
DokanDriverVersion.restype = ULONG
DokanDriverVersion.argtypes = (
)
DokanResetTimeout = windll.Dokan1.DokanResetTimeout
DokanResetTimeout.restype = BOOL
DokanResetTimeout.argtypes = (
ULONG, #timeout
PDOKAN_FILE_INFO, # file info pointer
)
GetFileSecurity = windll.advapi32.GetFileSecurityW
GetFileSecurity.restype = BOOL
GetFileSecurity.argtypes = (
LPWSTR, # _In_ LPCTSTR lpFileName,
SECURITY_INFORMATION, # _In_ SECURITY_INFORMATION RequestedInformation,
PSECURITY_DESCRIPTOR, # _Out_opt_ PSECURITY_DESCRIPTOR pSecurityDescriptor,
DWORD, # _In_ DWORD nLength,
LPDWORD, # _Out_ LPDWORD lpnLengthNeeded
)
| {
"content_hash": "c8b82d8036bb2117eff8c3443a6799b1",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 104,
"avg_line_length": 31.252669039145907,
"alnum_prop": 0.6246868594853109,
"repo_name": "PyFilesystem/pyfilesystem",
"id": "9d33be44838d7f9a848586bc97108b991e69b030",
"size": "8967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fs/expose/dokan/libdokan.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1052910"
}
],
"symlink_target": ""
} |
"""Leetcode 1007. Minimum Domino Rotations For Equal Row
Medium
URL: https://leetcode.com/problems/minimum-domino-rotations-for-equal-row/
In a row of dominoes, A[i] and B[i] represent the top and bottom halves of the
i-th domino. (A domino is a tile with two numbers from 1 to 6 - one on each half
of the tile.)
We may rotate the i-th domino, so that A[i] and B[i] swap values.
Return the minimum number of rotations so that all the values in A are the same,
or all the values in B are the same.
If it cannot be done, return -1.
Example 1:
Input: A = [2,1,2,4,2,2],
B = [5,2,6,2,3,2]
Output: 2
Explanation:
The first figure represents the dominoes as given by A and B: before we do any
rotations.
If we rotate the second and fourth dominoes, we can make every value in the top
row equal to 2, as indicated by the second figure.
Example 2:
Input: A = [3,5,1,2,3],
B = [3,6,3,3,4]
Output: -1
Explanation:
In this case, it is not possible to rotate the dominoes to make one row of values equal.
Note:
- 1 <= A[i], B[i] <= 6
- 2 <= A.length == B.length <= 20000
"""
class SolutionNumCountsCover(object):
def minDominoRotations(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
Time complexity: O(n).
Space complexity: O(n).
"""
n = len(A)
# Create 1~6 number counts for A and B.
A_num_counts = [0] * 7
B_num_counts = [0] * 7
for i in range(n):
A_num_counts[A[i]] += 1
B_num_counts[B[i]] += 1
for num in range(1, 7):
# Check if number covers the whole list;
# if yes, return min diff between length and number counts in A & B.
if all([num == a or num == b for a, b in zip(A, B)]):
return min(n - A_num_counts[num], n - B_num_counts[num])
return -1
class SolutionNumCountsUnion(object):
def minDominoRotations(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
Time complexity: O(n).
Space complexity: O(n).
"""
n = len(A)
# Create 1~6 number counts for A and B and at the same pos if A[i] == B[i].
A_num_counts = [0] * 7
B_num_counts = [0] * 7
same_num_counts = [0] * 7
for i in range(n):
A_num_counts[A[i]] += 1
B_num_counts[B[i]] += 1
if A[i] == B[i]:
same_num_counts[A[i]] += 1
# Check iteratively all in numbers, their union set cover the whole list.
for j in range(1, 7):
if A_num_counts[j] + B_num_counts[j] - same_num_counts[j] == n:
return min(n - A_num_counts[j], n - B_num_counts[j])
return -1
def main():
# Output: 2
A = [2,1,2,4,2,2]
B = [5,2,6,2,3,2]
print SolutionNumCountsCover().minDominoRotations(A, B)
print SolutionNumCountsUnion().minDominoRotations(A, B)
# Output: -1
A = [3,5,1,2,3]
B = [3,6,3,3,4]
print SolutionNumCountsCover().minDominoRotations(A, B)
print SolutionNumCountsUnion().minDominoRotations(A, B)
# Output: 1
A = [1,5,1,2,3]
B = [3,3,3,3,4]
print SolutionNumCountsCover().minDominoRotations(A, B)
print SolutionNumCountsUnion().minDominoRotations(A, B)
if __name__ == '__main__':
main()
| {
"content_hash": "0d317cc89a116d5651db17cd1e1745d2",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 88,
"avg_line_length": 27.859504132231404,
"alnum_prop": 0.5734203500444972,
"repo_name": "bowen0701/algorithms_data_structures",
"id": "f5d57eb1fa4c992d4e41b61aa96ade01a782f734",
"size": "3371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lc1007_minimum_domino_rotations_for_equal_row.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "108750"
}
],
"symlink_target": ""
} |
from datetime import timedelta
from enum import Enum
from typing import (Any,
Dict,
Optional)
class EndpointState(Enum):
Disconnected = "disconnected"
Connecting = "connecting"
Connected = "connected"
Disconnecting = "disconnecting"
class ClusterState(Enum):
Online = "online"
Degraded = "degraded"
Offline = "offline"
class ServiceType(Enum):
View = "views"
KeyValue = "kv"
Query = "query"
Search = "search"
Analytics = "analytics"
Management = "mgmt"
class PingState(Enum):
OK = 'ok'
TIMEOUT = 'timeout'
ERROR = 'error'
class EndpointDiagnosticsReport:
def __init__(self,
service_type, # type: ServiceType
source # type: Dict[str, Any]
):
self._src = source
self._service_type = service_type
@property
def type(self) -> ServiceType:
"""**DEPRECATED** user service_type
Endpoint point service type
Returns:
ServiceType: Endpoint Service Type
"""
return self._service_type
@property
def service_type(self) -> ServiceType:
return self._service_type
@property
def id(self) -> str:
return self._src.get('id', None)
@property
def local(self) -> str:
return self._src.get('local', None)
@property
def remote(self) -> str:
return self._src.get('remote', None)
@property
def namespace(self) -> str:
# was 'scope', now 'namespace'
return self._src.get('namespace', None)
@property
def last_activity(self) -> timedelta:
"""**DEPRECATED** user last_activity_us
Endpoint point last activity in us
Returns:
timedelta: last activity in us
"""
return timedelta(microseconds=self._src.get('last_activity_us', None))
@property
def last_activity_us(self) -> timedelta:
return timedelta(microseconds=self._src.get('last_activity_us', None))
@property
def state(self) -> EndpointState:
return EndpointState(self._src.get('state', None))
def as_dict(self) -> dict:
return self._src
class EndpointPingReport:
def __init__(self,
service_type, # type: ServiceType
source # type: Dict[str, Any]
):
self._src_ping = source
self._service_type = service_type
@property
def service_type(self) -> ServiceType:
return self._service_type
@property
def id(self) -> str:
return self._src_ping.get('id', None)
@property
def local(self) -> str:
return self._src_ping.get('local', None)
@property
def remote(self) -> str:
return self._src_ping.get('remote', None)
@property
def namespace(self) -> Optional[str]:
# was 'scope', now 'namespace'
return self._src_ping.get(
'namespace', self._src_ping.get('scope', None))
@property
def error(self) -> Optional[str]:
return self._src_ping.get('error', None)
@property
def latency(self) -> timedelta:
return timedelta(microseconds=self._src_ping.get('latency_us', None))
@property
def state(self) -> PingState:
return PingState(self._src_ping.get('state', None))
def as_dict(self) -> Dict[str, Any]:
return self._src_ping
def __repr__(self):
return "EndpointPingReport:{}".format(self._src_ping)
| {
"content_hash": "b6ccbf2a2d60c810515919da14b79a9f",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 78,
"avg_line_length": 24.075342465753426,
"alnum_prop": 0.5812233285917496,
"repo_name": "couchbase/couchbase-python-client",
"id": "7a9dac873326aff41b7082773553a235e9940e1d",
"size": "4134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "couchbase/diagnostics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "779634"
},
{
"name": "CMake",
"bytes": "5320"
},
{
"name": "Python",
"bytes": "2787486"
}
],
"symlink_target": ""
} |
"""Command line manager for opsdroid."""
import click
from opsdroid.cli.config import config
from opsdroid.cli.logs import logs
from opsdroid.cli.start import start
from opsdroid.cli.version import version
@click.group()
@click.pass_context
def cli(ctx):
"""Opsdroid is a chat bot framework written in Python.
It is designed to be extendable, scalable and simple.
See https://opsdroid.github.io/ for more information.
"""
cli.add_command(config)
cli.add_command(logs)
cli.add_command(start)
cli.add_command(version)
| {
"content_hash": "6fb055b631ceaac26aee752bdc79293f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 58,
"avg_line_length": 22.458333333333332,
"alnum_prop": 0.7495361781076066,
"repo_name": "opsdroid/opsdroid",
"id": "b4ddcd5869aff871f7a249dbf38cab40df928cc9",
"size": "539",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "opsdroid/cli/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1755"
},
{
"name": "Jinja",
"bytes": "2320"
},
{
"name": "Jupyter Notebook",
"bytes": "848"
},
{
"name": "Python",
"bytes": "1178414"
}
],
"symlink_target": ""
} |
def db_inspect(db):
rep = []
for k, v in db.items():
if "id" in v:
rep.append("{}({})".format(k, v["id"]))
else:
rep.append(str(k))
print("DB state: {}".format(', '.join(rep)))
# You don't have to worry about this
class IdGen:
def __init__(self, starting_id):
self.current_id = starting_id - 1
def next(self):
self.current_id += 1
return self.current_id
# -------------------
# ---
# How to use this to get ids:
# `ids.next()` will return an incrementing ID
# ---
ids = IdGen(1)
# This is sample data (DON'T EDIT THIS!)
sample_data = [
("Foo, Inc.", "foo"),
("Bar, Inc.", "bar"),
("Baz, Inc.", "baz"),
("Zoo, Inc.", "zoo"),
("Legit Co, Inc.", "lco"),
("Not Legit Co.", "bar"),
]
# This is your in-memory database:
db = {}
db_inspect(db)
print("* READ: bar")
# --- start: YOUR READ CODE ---
if 'bar' in db.keys():
print(db['bar'] )
else:
print(None)
# --- end: YOUR READ CODE ---
print()
# Create 2 customers
for name, short_name in sample_data[:2]:
print("* CREATE: " + short_name)
# --- start: YOUR CREATE CODE ---
db[short_name] = {"id": ids.next(), "name": name, "short_name": short_name}
# --- end: YOUR CREATE CODE ---
db_inspect(db)
print()
print("* READ: bar")
# --- start: YOUR READ CODE ---
if 'bar' in db.keys():
print(db['bar'])
else:
print(None)
# --- end: YOUR READ CODE ---
print()
print("* READ: lco")
# --- start: YOUR READ CODE ---
if 'lco' in db.keys():
print(db['lco'] )
else:
print(None)
# --- end: YOUR READ CODE ---
print()
print("* DELETE: lco")
# --- start: YOUR DELETE CODE ---
if 'lco' in db.keys():
del(db['lco'])
else:
print('"lco" is not in specified database.')
# --- end: YOUR DELETE CODE ---
db_inspect(db)
print()
print("* DELETE: foo")
# --- start: YOUR DELETE CODE ---
if 'foo' in db.keys():
del(db['foo'] )
print('"foo" has now been deleted from database.')
else:
print(None)
# --- end: YOUR DELETE CODE ---
db_inspect(db)
print()
# Create 2 customers
for name, short_name in sample_data:
print("* CREATE: " + short_name)
# --- start: YOUR CREATE CODE ---
db[short_name] = {"id": ids.next(), "name": name, "short_name": short_name}
# --- end: YOUR CREATE CODE ---
db_inspect(db)
| {
"content_hash": "ba40b534fefb71d9c49b96ea02aefb9d",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 19.491666666666667,
"alnum_prop": 0.5391192817443352,
"repo_name": "thoughtarray/IntroToProgramming",
"id": "40336a3b26922a246c2deeba6cdfbd1381ebbf61",
"size": "2376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Chapter 6 Dictionaries/hw/vv/crm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4171"
},
{
"name": "Python",
"bytes": "60612"
}
],
"symlink_target": ""
} |
"""Custom script to run PyLint on apitools codebase.
"Inspired" by the similar script in gcloud-python.
This runs pylint as a script via subprocess in two different
subprocesses. The first lints the production/library code
using the default rc file (PRODUCTION_RC). The second lints the
demo/test code using an rc file (TEST_RC) which allows more style
violations (hence it has a reduced number of style checks).
"""
import ConfigParser
import copy
import os
import subprocess
import sys
IGNORED_DIRECTORIES = [
'apitools/gen/testdata',
'samples/storage_sample/storage',
'venv',
]
IGNORED_FILES = [
'ez_setup.py',
'run_pylint.py',
'setup.py',
]
PRODUCTION_RC = 'default.pylintrc'
TEST_RC = 'reduced.pylintrc'
TEST_DISABLED_MESSAGES = [
'exec-used',
'invalid-name',
'missing-docstring',
'protected-access',
]
TEST_RC_ADDITIONS = {
'MESSAGES CONTROL': {
'disable': ',\n'.join(TEST_DISABLED_MESSAGES),
},
}
def read_config(filename):
"""Reads pylintrc config onto native ConfigParser object."""
config = ConfigParser.ConfigParser()
with open(filename, 'r') as file_obj:
config.readfp(file_obj)
return config
def make_test_rc(base_rc_filename, additions_dict, target_filename):
"""Combines a base rc and test additions into single file."""
main_cfg = read_config(base_rc_filename)
# Create fresh config for test, which must extend production.
test_cfg = ConfigParser.ConfigParser()
test_cfg._sections = copy.deepcopy(main_cfg._sections)
for section, opts in additions_dict.items():
curr_section = test_cfg._sections.setdefault(
section, test_cfg._dict())
for opt, opt_val in opts.items():
curr_val = curr_section.get(opt)
if curr_val is None:
raise KeyError('Expected to be adding to existing option.')
curr_section[opt] = '%s\n%s' % (curr_val, opt_val)
with open(target_filename, 'w') as file_obj:
test_cfg.write(file_obj)
def valid_filename(filename):
"""Checks if a file is a Python file and is not ignored."""
for directory in IGNORED_DIRECTORIES:
if filename.startswith(directory):
return False
return (filename.endswith('.py') and
filename not in IGNORED_FILES)
def is_production_filename(filename):
"""Checks if the file contains production code.
:rtype: boolean
:returns: Boolean indicating production status.
"""
return not ('demo' in filename or 'test' in filename or
filename.startswith('regression'))
def get_files_for_linting(allow_limited=True, diff_base=None):
"""Gets a list of files in the repository.
By default, returns all files via ``git ls-files``. However, in some cases
uses a specific commit or branch (a so-called diff base) to compare
against for changed files. (This requires ``allow_limited=True``.)
To speed up linting on Travis pull requests against main, we manually
set the diff base to origin/main. We don't do this on non-pull requests
since origin/main will be equivalent to the currently checked out code.
One could potentially use ${TRAVIS_COMMIT_RANGE} to find a diff base but
this value is not dependable.
:type allow_limited: boolean
:param allow_limited: Boolean indicating if a reduced set of files can
be used.
:rtype: pair
:returns: Tuple of the diff base using the the list of filenames to be
linted.
"""
if os.getenv('TRAVIS') == 'true':
# In travis, don't default to main.
diff_base = None
if (os.getenv('TRAVIS_BRANCH') == 'main' and
os.getenv('TRAVIS_PULL_REQUEST') != 'false'):
# In the case of a pull request into main, we want to
# diff against HEAD in main.
diff_base = 'origin/main'
if diff_base is not None and allow_limited:
result = subprocess.check_output(['git', 'diff', '--name-only',
diff_base])
print 'Using files changed relative to %s:' % (diff_base,)
print '-' * 60
print result.rstrip('\n') # Don't print trailing newlines.
print '-' * 60
else:
print 'Diff base not specified, listing all files in repository.'
result = subprocess.check_output(['git', 'ls-files'])
return result.rstrip('\n').split('\n'), diff_base
def get_python_files(all_files=None, diff_base=None):
"""Gets a list of all Python files in the repository that need linting.
Relies on :func:`get_files_for_linting()` to determine which files should
be considered.
NOTE: This requires ``git`` to be installed and requires that this
is run within the ``git`` repository.
:type all_files: list or ``NoneType``
:param all_files: Optional list of files to be linted.
:rtype: tuple
:returns: A tuple containing two lists and a boolean. The first list
contains all production files, the next all test/demo files and
the boolean indicates if a restricted fileset was used.
"""
using_restricted = False
if all_files is None:
all_files, diff_base = get_files_for_linting(diff_base=diff_base)
using_restricted = diff_base is not None
library_files = []
non_library_files = []
for filename in all_files:
if valid_filename(filename):
if is_production_filename(filename):
library_files.append(filename)
else:
non_library_files.append(filename)
return library_files, non_library_files, using_restricted
def lint_fileset(filenames, rcfile, description):
"""Lints a group of files using a given rcfile."""
# Only lint filenames that exist. For example, 'git diff --name-only'
# could spit out deleted / renamed files. Another alternative could
# be to use 'git diff --name-status' and filter out files with a
# status of 'D'.
filenames = [filename for filename in filenames
if os.path.exists(filename)]
if filenames:
rc_flag = '--rcfile=%s' % (rcfile,)
pylint_shell_command = ['pylint', rc_flag] + filenames
status_code = subprocess.call(pylint_shell_command)
if status_code != 0:
error_message = ('Pylint failed on %s with '
'status %d.' % (description, status_code))
print >> sys.stderr, error_message
sys.exit(status_code)
else:
print 'Skipping %s, no files to lint.' % (description,)
def main(argv):
"""Script entry point. Lints both sets of files."""
diff_base = argv[1] if len(argv) > 1 else None
make_test_rc(PRODUCTION_RC, TEST_RC_ADDITIONS, TEST_RC)
library_files, non_library_files, using_restricted = get_python_files(
diff_base=diff_base)
try:
lint_fileset(library_files, PRODUCTION_RC, 'library code')
lint_fileset(non_library_files, TEST_RC, 'test and demo code')
except SystemExit:
if not using_restricted:
raise
message = 'Restricted lint failed, expanding to full fileset.'
print >> sys.stderr, message
all_files, _ = get_files_for_linting(allow_limited=False)
library_files, non_library_files, _ = get_python_files(
all_files=all_files)
lint_fileset(library_files, PRODUCTION_RC, 'library code')
lint_fileset(non_library_files, TEST_RC, 'test and demo code')
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "8f7a6aca69e7089b4216970c3e216d4d",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 78,
"avg_line_length": 35.25116279069768,
"alnum_prop": 0.6417733210186041,
"repo_name": "KaranToor/MA450",
"id": "3103aa109dd77f2914c9fc724d3c96045472e0c7",
"size": "8157",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/platform/gsutil/third_party/apitools/run_pylint.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
import copy
import os
import re
import shlex
from ..outproc import base as outproc
from ..local import command
from ..local import statusfile
from ..local import utils
from ..local.variants import INCOMPATIBLE_FLAGS_PER_VARIANT
from ..local.variants import INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE
from ..local.variants import INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
# Patterns for additional resource files on Android. Files that are not covered
# by one of the other patterns below will be specified in the resources section.
RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
# Pattern to auto-detect files to push on Android for statements like:
# load("path/to/file.js")
LOAD_PATTERN = re.compile(
r"(?:load|readbuffer|read)\((?:'|\")([^'\"]+)(?:'|\")\)")
# Pattern to auto-detect files to push on Android for statements like:
# import foobar from "path/to/file.js"
# import {foo, bar} from "path/to/file.js"
# export {"foo" as "bar"} from "path/to/file.js"
MODULE_FROM_RESOURCES_PATTERN = re.compile(
r"(?:import|export).*?from\s*\(?['\"]([^'\"]+)['\"]",
re.MULTILINE | re.DOTALL)
# Pattern to detect files to push on Android for statements like:
# import "path/to/file.js"
# import("module.mjs").catch()...
MODULE_IMPORT_RESOURCES_PATTERN = re.compile(
r"import\s*\(?['\"]([^'\"]+)['\"]",
re.MULTILINE | re.DOTALL)
# Pattern to detect and strip test262 frontmatter from tests to prevent false
# positives for MODULE_RESOURCES_PATTERN above.
TEST262_FRONTMATTER_PATTERN = re.compile(r"/\*---.*?---\*/", re.DOTALL)
TIMEOUT_LONG = "long"
try:
cmp # Python 2
except NameError:
def cmp(x, y): # Python 3
return (x > y) - (x < y)
class TestCase(object):
def __init__(self, suite, path, name, test_config):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.name = name # string that identifies test in the status file
self.variant = None # name of the used testing variant
self.variant_flags = [] # list of strings, flags specific to this test
# Fields used by the test processors.
self.origin = None # Test that this test is subtest of.
self.processor = None # Processor that created this subtest.
self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
self.keep_output = False # Can output of this test be dropped
# Test config contains information needed to build the command.
self._test_config = test_config
self._random_seed = None # Overrides test config value if not None
# Outcomes
self._statusfile_outcomes = None
self._expected_outcomes = None
self._checked_flag_contradictions = False
self._statusfile_flags = None
self.expected_failure_reason = None
self._prepare_outcomes()
def create_subtest(self, processor, subtest_id, variant=None, flags=None,
keep_output=False, random_seed=None):
subtest = copy.copy(self)
subtest.origin = self
subtest.processor = processor
subtest.procid += '.%s' % subtest_id
subtest.keep_output |= keep_output
if random_seed:
subtest._random_seed = random_seed
if flags:
subtest.variant_flags = subtest.variant_flags + flags
if variant is not None:
assert self.variant is None
subtest.variant = variant
subtest._prepare_outcomes()
return subtest
def _prepare_outcomes(self, force_update=True):
if force_update or self._statusfile_outcomes is None:
def is_flag(outcome):
return outcome.startswith('--')
def not_flag(outcome):
return not is_flag(outcome)
outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
self._statusfile_outcomes = filter(not_flag, outcomes)
self._statusfile_flags = filter(is_flag, outcomes)
self._expected_outcomes = (
self._parse_status_file_outcomes(self._statusfile_outcomes))
def _parse_status_file_outcomes(self, outcomes):
if (statusfile.FAIL_SLOPPY in outcomes and
'--use-strict' not in self.variant_flags):
return outproc.OUTCOMES_FAIL
expected_outcomes = []
if (statusfile.FAIL in outcomes or
statusfile.FAIL_OK in outcomes):
expected_outcomes.append(statusfile.FAIL)
if statusfile.CRASH in outcomes:
expected_outcomes.append(statusfile.CRASH)
# Do not add PASS if there is nothing else. Empty outcomes are converted to
# the global [PASS].
if expected_outcomes and statusfile.PASS in outcomes:
expected_outcomes.append(statusfile.PASS)
# Avoid creating multiple instances of a list with a single FAIL.
if expected_outcomes == outproc.OUTCOMES_FAIL:
return outproc.OUTCOMES_FAIL
return expected_outcomes or outproc.OUTCOMES_PASS
def allow_timeouts(self):
if self.expected_outcomes == outproc.OUTCOMES_PASS:
self._expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
elif self.expected_outcomes == outproc.OUTCOMES_FAIL:
self._expected_outcomes = outproc.OUTCOMES_FAIL_OR_TIMEOUT
elif statusfile.TIMEOUT not in self.expected_outcomes:
self._expected_outcomes = (
self.expected_outcomes + [statusfile.TIMEOUT])
@property
def expected_outcomes(self):
def normalize_flag(flag):
return flag.replace("_", "-").replace("--no-", "--no")
def has_flag(conflicting_flag, flags):
conflicting_flag = normalize_flag(conflicting_flag)
if conflicting_flag in flags:
return True
if conflicting_flag.endswith("*"):
return any(flag.startswith(conflicting_flag[:-1]) for flag in flags)
return False
def check_flags(incompatible_flags, actual_flags, rule):
for incompatible_flag in incompatible_flags:
if has_flag(incompatible_flag, actual_flags):
self._statusfile_outcomes = outproc.OUTCOMES_FAIL
self._expected_outcomes = outproc.OUTCOMES_FAIL
self.expected_failure_reason = ("Rule " + rule + " in " +
"tools/testrunner/local/variants.py expected a flag " +
"contradiction error with " + incompatible_flag + ".")
if not self._checked_flag_contradictions:
self._checked_flag_contradictions = True
file_specific_flags = (self._get_source_flags() + self._get_suite_flags()
+ self._get_statusfile_flags())
file_specific_flags = [normalize_flag(flag) for flag in file_specific_flags]
extra_flags = [normalize_flag(flag) for flag in self._get_extra_flags()]
if self.variant in INCOMPATIBLE_FLAGS_PER_VARIANT:
check_flags(INCOMPATIBLE_FLAGS_PER_VARIANT[self.variant], file_specific_flags,
"INCOMPATIBLE_FLAGS_PER_VARIANT[\""+self.variant+"\"]")
for variable, incompatible_flags in INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE.items():
if self.suite.statusfile.variables[variable]:
check_flags(incompatible_flags, file_specific_flags,
"INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\""+variable+"\"]")
for extra_flag, incompatible_flags in INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG.items():
if has_flag(extra_flag, extra_flags):
check_flags(incompatible_flags, file_specific_flags,
"INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG[\""+extra_flag+"\"]")
return self._expected_outcomes
@property
def do_skip(self):
return (statusfile.SKIP in self._statusfile_outcomes and
not self.suite.test_config.run_skipped)
@property
def is_slow(self):
return statusfile.SLOW in self._statusfile_outcomes
@property
def is_fail_ok(self):
return statusfile.FAIL_OK in self._statusfile_outcomes
@property
def is_pass_or_fail(self):
return (statusfile.PASS in self._statusfile_outcomes and
statusfile.FAIL in self._statusfile_outcomes and
statusfile.CRASH not in self._statusfile_outcomes)
@property
def is_fail(self):
return (statusfile.FAIL in self._statusfile_outcomes and
statusfile.PASS not in self._statusfile_outcomes)
@property
def only_standard_variant(self):
return statusfile.NO_VARIANTS in self._statusfile_outcomes
def get_command(self):
params = self._get_cmd_params()
env = self._get_cmd_env()
shell = self.get_shell()
if utils.IsWindows():
shell += '.exe'
shell_flags = self._get_shell_flags()
timeout = self._get_timeout(params)
return self._create_cmd(shell, shell_flags + params, env, timeout)
def _get_cmd_params(self):
"""Gets command parameters and combines them in the following order:
- files [empty by default]
- random seed
- mode flags (based on chosen mode)
- extra flags (from command line)
- user flags (variant/fuzzer flags)
- source flags (from source code) [empty by default]
- test-suite flags
- statusfile flags
The best way to modify how parameters are created is to only override
methods for getting partial parameters.
"""
return (
self._get_files_params() +
self._get_random_seed_flags() +
self._get_mode_flags() +
self._get_extra_flags() +
self._get_variant_flags() +
self._get_source_flags() +
self._get_suite_flags() +
self._get_statusfile_flags()
)
def _get_cmd_env(self):
return {}
def _get_files_params(self):
return []
def _get_timeout_param(self):
return None
def _get_random_seed_flags(self):
return ['--random-seed=%d' % self.random_seed]
@property
def random_seed(self):
return self._random_seed or self._test_config.random_seed
def _get_extra_flags(self):
return self._test_config.extra_flags
def _get_variant_flags(self):
return self.variant_flags
def _get_statusfile_flags(self):
"""Gets runtime flags from a status file.
Every outcome that starts with "--" is a flag.
"""
return self._statusfile_flags
def _get_mode_flags(self):
return self._test_config.mode_flags
def _get_source_flags(self):
return []
def _get_suite_flags(self):
return []
def _get_shell_flags(self):
return []
def _get_timeout(self, params):
timeout = self._test_config.timeout
if "--stress-opt" in params:
timeout *= 4
if "--jitless" in params:
timeout *= 2
if "--no-opt" in params:
timeout *= 2
if "--noenable-vfp3" in params:
timeout *= 2
if self._get_timeout_param() == TIMEOUT_LONG:
timeout *= 10
if self.is_slow:
timeout *= 4
return timeout
def get_shell(self):
raise NotImplementedError()
def _get_suffix(self):
return '.js'
def _create_cmd(self, shell, params, env, timeout):
return command.Command(
cmd_prefix=self._test_config.command_prefix,
shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
args=params,
env=env,
timeout=timeout,
verbose=self._test_config.verbose,
resources_func=self._get_resources,
handle_sigterm=True,
)
def _parse_source_flags(self, source=None):
source = source or self.get_source()
flags = []
for match in re.findall(FLAGS_PATTERN, source):
flags += shlex.split(match.strip())
return flags
def is_source_available(self):
return self._get_source_path() is not None
def get_source(self):
with open(self._get_source_path()) as f:
return f.read()
def _get_source_path(self):
return None
def _get_resources(self):
"""Returns a list of absolute paths with additional files needed by the
test case.
Used to push additional files to Android devices.
"""
return []
def skip_predictable(self):
"""Returns True if the test case is not suitable for predictable testing."""
return True
@property
def output_proc(self):
if self.expected_outcomes is outproc.OUTCOMES_PASS:
return outproc.DEFAULT
return outproc.OutProc(self.expected_outcomes)
def __cmp__(self, other):
# Make sure that test cases are sorted correctly if sorted without
# key function. But using a key function is preferred for speed.
return cmp(
(self.suite.name, self.name, self.variant),
(other.suite.name, other.name, other.variant)
)
def __str__(self):
return self.suite.name + '/' + self.name
class D8TestCase(TestCase):
def get_shell(self):
return "d8"
def _get_shell_flags(self):
return ['--test']
def _get_resources_for_file(self, file):
"""Returns for a given file a list of absolute paths of files needed by the
given file.
"""
with open(file) as f:
source = f.read()
result = []
def add_path(path):
result.append(os.path.abspath(path.replace('/', os.path.sep)))
def add_import_path(import_path):
add_path(os.path.normpath(
os.path.join(os.path.dirname(file), import_path)))
def strip_test262_frontmatter(input):
return TEST262_FRONTMATTER_PATTERN.sub('', input)
for match in RESOURCES_PATTERN.finditer(source):
# There are several resources per line. Relative to base dir.
for path in match.group(1).strip().split():
add_path(path)
# Strip test262 frontmatter before looking for load() and import/export
# statements.
source = strip_test262_frontmatter(source)
for match in LOAD_PATTERN.finditer(source):
# Files in load statements are relative to base dir.
add_path(match.group(1))
# Imported files are relative to the file importing them.
for match in MODULE_FROM_RESOURCES_PATTERN.finditer(source):
add_import_path(match.group(1))
for match in MODULE_IMPORT_RESOURCES_PATTERN.finditer(source):
add_import_path(match.group(1))
return result
def _get_resources(self):
"""Returns the list of files needed by a test case."""
if not self._get_source_path():
return []
result = set()
to_check = [self._get_source_path()]
# Recurse over all files until reaching a fixpoint.
while to_check:
next_resource = to_check.pop()
result.add(next_resource)
for resource in self._get_resources_for_file(next_resource):
# Only add files that exist on disc. The pattens we check for give some
# false positives otherwise.
if resource not in result and os.path.exists(resource):
to_check.append(resource)
return sorted(list(result))
def skip_predictable(self):
"""Returns True if the test case is not suitable for predictable testing."""
return (statusfile.FAIL in self.expected_outcomes or
self.output_proc.negative)
| {
"content_hash": "59300df1906fae8f257aaa6377662020",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 88,
"avg_line_length": 34.31542056074766,
"alnum_prop": 0.662558725403418,
"repo_name": "youtube/cobalt_sandbox",
"id": "e037f996797256eecb8bd91a304d5a3292069d4a",
"size": "16259",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "third_party/v8/tools/testrunner/objects/testcase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import (
member_form,
member_success,
)
urlpatterns = [
url(r'^$', member_form),
url(r'^success/$', member_success, name="member-success")
]
| {
"content_hash": "daf21853637525932cafb17ec25412c0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 61,
"avg_line_length": 17.083333333333332,
"alnum_prop": 0.6390243902439025,
"repo_name": "deadbeatfour/reg-form",
"id": "668261dc187ecbc2697055a72cfc779840b1cc8a",
"size": "205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "members/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3967"
},
{
"name": "HTML",
"bytes": "3375"
},
{
"name": "Python",
"bytes": "8261"
}
],
"symlink_target": ""
} |
from pykit import awssign
from pykit import http
from pykit import httpmultipart
def send_post_request(host, port, headers, fields):
conn = http.Client(host, port)
multipart_cli = httpmultipart.Multipart()
headers = multipart_cli.make_headers(fields, headers)
body = multipart_cli.make_body_reader(fields)
conn.send_request('/', method='POST', headers=headers)
for data in body:
conn.send_body(data)
ret_status, ret_headers = conn.read_response()
body = []
while True:
buf = conn.read_body(1024*1024)
if buf == '':
break
body.append(buf)
return {
'status_code': ret_status,
'headers': ret_headers,
'body': ''.join(body),
}
if __name__ == '__main__':
bucket_name = 'your bucket name'
key_name = 'key name to upload'
endpoint = 's2 endpoint domain name'
# https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
# Host must be in the format of destinationBucket.endpoint
# you should add it in /etc/hosts
host = bucket_name + '.' + endpoint
port = 80
access_key = 'access key'
secret_key = 'secret key'
signer = awssign.Signer(access_key, secret_key)
fields = {
'key': key_name, # key name
'Policy': {
'expiration': '2018-09-30T00:00:00.000Z',
'conditions': [
['starts-with', '$key', ''],
{
'bucket': bucket_name, # bucket name
},
],
},
}
headers = {
'Host': host,
}
signer.add_post_auth(fields, request_date='20180911T120101Z')
fields_to_sent = []
for k, v in fields.iteritems():
fields_to_sent.append({'name': k, 'value': v})
# file must be the last field
# content can also be a opened file
content = 'this is test of awssign.add_post_auth'
fields_to_sent.append({
'name': 'file',
'value': [content, len(content), 'file name'],
})
print send_post_request(host, port, headers, fields_to_sent)
| {
"content_hash": "3eb2a68118631d7fd562388b6f69f385",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 73,
"avg_line_length": 25.85185185185185,
"alnum_prop": 0.5711556829035339,
"repo_name": "sejust/pykit",
"id": "5997e50989efb92b5da0d71973aaddcfc1895988",
"size": "2136",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "awssign/example/post_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "1193736"
},
{
"name": "Shell",
"bytes": "45490"
}
],
"symlink_target": ""
} |
from pandas import concat
from .read_gmt import read_gmt
def read_gmts(gmt_file_paths, sets=None, drop_description=True, collapse=False):
dfs = []
for gmt_file_path in gmt_file_paths:
dfs.append(read_gmt(gmt_file_path, drop_description=drop_description))
df = concat(dfs, sort=True)
if sets is not None:
df = df.loc[(df.index & sets)].dropna(axis=1, how="all")
if collapse:
return df.unstack().dropna().sort_values().unique()
else:
return df
| {
"content_hash": "5ca9dc29fe853df72790559564868fce",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 19.653846153846153,
"alnum_prop": 0.6360078277886497,
"repo_name": "UCSD-CCAL/ccal",
"id": "7199b5e98027aa356bbbcfa4999ac770ec91c213",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccal/read_gmts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "20830"
},
{
"name": "Python",
"bytes": "294577"
}
],
"symlink_target": ""
} |
import numpy
import os
class CachedDatasets:
"""A convenience class to access cached time series datasets.
Note, that these *cached datasets* are statically included into *tslearn*
and are distinct from the ones in :class:`UCR_UEA_datasets`.
When using the Trace dataset, please cite [1]_.
See Also
--------
UCR_UEA_datasets : Provides more datasets and supports caching.
References
----------
.. [1] A. Bagnall, J. Lines, W. Vickers and E. Keogh, The UEA & UCR Time
Series Classification Repository, www.timeseriesclassification.com
"""
def __init__(self):
self.path = os.path.join(os.path.dirname(__file__),
"..",
".cached_datasets")
def list_datasets(self):
"""List cached datasets.
Examples
--------
>>> from tslearn.datasets import UCR_UEA_datasets
>>> _ = UCR_UEA_datasets().load_dataset("Trace")
>>> cached = UCR_UEA_datasets().list_cached_datasets()
>>> "Trace" in cached
True
Returns
-------
list of str:
A list of names of all cached (univariate and multivariate) dataset
namas.
"""
return [fname[:fname.rfind(".")]
for fname in os.listdir(self.path)
if fname.endswith(".npz")]
def load_dataset(self, dataset_name):
"""Load a cached dataset from its name.
Parameters
----------
dataset_name : str
Name of the dataset. Should be in the list returned by
:meth:`~list_datasets`.
Returns
-------
numpy.ndarray of shape (n_ts_train, sz, d) or None
Training time series. None if unsuccessful.
numpy.ndarray of integers with shape (n_ts_train, ) or None
Training labels. None if unsuccessful.
numpy.ndarray of shape (n_ts_test, sz, d) or None
Test time series. None if unsuccessful.
numpy.ndarray of integers with shape (n_ts_test, ) or None
Test labels. None if unsuccessful.
Examples
--------
>>> data_loader = CachedDatasets()
>>> X_train, y_train, X_test, y_test = data_loader.load_dataset(
... "Trace")
>>> print(X_train.shape)
(100, 275, 1)
>>> print(y_train.shape)
(100,)
Raises
------
IOError
If the dataset does not exist or cannot be read.
"""
npzfile = numpy.load(os.path.join(self.path, dataset_name + ".npz"))
X_train = npzfile["X_train"]
X_test = npzfile["X_test"]
y_train = npzfile["y_train"]
y_test = npzfile["y_test"]
return X_train, y_train, X_test, y_test
| {
"content_hash": "d677849b70a63395c408995aa0f203fc",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 32.67816091954023,
"alnum_prop": 0.5378121702427013,
"repo_name": "rtavenar/tslearn",
"id": "5a60d1415994e60667ed89b0131810579d59be1d",
"size": "2843",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tslearn/datasets/cached.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6703"
},
{
"name": "Makefile",
"bytes": "6789"
},
{
"name": "Python",
"bytes": "444385"
}
],
"symlink_target": ""
} |
"""Integer programming examples that show how to use the APIs."""
from ortools.linear_solver import pywraplp
from ortools.init import pywrapinit
def Announce(solver, api_type):
print('---- Integer programming example with ' + solver + ' (' + api_type +
') -----')
def RunIntegerExampleNaturalLanguageAPI(optimization_problem_type):
"""Example of simple integer program with natural language API."""
solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
if not solver:
return
Announce(optimization_problem_type, 'natural language API')
infinity = solver.infinity()
# x1 and x2 are integer non-negative variables.
x1 = solver.IntVar(0.0, infinity, 'x1')
x2 = solver.IntVar(0.0, infinity, 'x2')
solver.Minimize(x1 + 2 * x2)
solver.Add(3 * x1 + 2 * x2 >= 17)
SolveAndPrint(solver, [x1, x2])
def RunIntegerExampleCppStyleAPI(optimization_problem_type):
"""Example of simple integer program with the C++ style API."""
solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
if not solver:
return
Announce(optimization_problem_type, 'C++ style API')
infinity = solver.infinity()
# x1 and x2 are integer non-negative variables.
x1 = solver.IntVar(0.0, infinity, 'x1')
x2 = solver.IntVar(0.0, infinity, 'x2')
# Minimize x1 + 2 * x2.
objective = solver.Objective()
objective.SetCoefficient(x1, 1)
objective.SetCoefficient(x2, 2)
# 2 * x2 + 3 * x1 >= 17.
ct = solver.Constraint(17, infinity)
ct.SetCoefficient(x1, 3)
ct.SetCoefficient(x2, 2)
SolveAndPrint(solver, [x1, x2])
def SolveAndPrint(solver, variable_list):
"""Solve the problem and print the solution."""
print('Number of variables = %d' % solver.NumVariables())
print('Number of constraints = %d' % solver.NumConstraints())
result_status = solver.Solve()
# The problem has an optimal solution.
assert result_status == pywraplp.Solver.OPTIMAL
# The solution looks legit (when using solvers others than
# GLOP_LINEAR_PROGRAMMING, verifying the solution is highly recommended!).
assert solver.VerifySolution(1e-7, True)
print('Problem solved in %f milliseconds' % solver.wall_time())
# The objective value of the solution.
print('Optimal objective value = %f' % solver.Objective().Value())
# The value of each variable in the solution.
for variable in variable_list:
print('%s = %f' % (variable.name(), variable.solution_value()))
print('Advanced usage:')
print('Problem solved in %d branch-and-bound nodes' % solver.nodes())
def RunAllIntegerExampleNaturalLanguageAPI():
RunIntegerExampleNaturalLanguageAPI('GLPK')
RunIntegerExampleNaturalLanguageAPI('CBC')
RunIntegerExampleNaturalLanguageAPI('SCIP')
RunIntegerExampleNaturalLanguageAPI('SAT')
RunIntegerExampleNaturalLanguageAPI('Gurobi')
def RunAllIntegerExampleCppStyleAPI():
RunIntegerExampleCppStyleAPI('GLPK')
RunIntegerExampleCppStyleAPI('CBC')
RunIntegerExampleCppStyleAPI('SCIP')
RunIntegerExampleCppStyleAPI('SAT')
RunIntegerExampleCppStyleAPI('Gurobi')
def main():
RunAllIntegerExampleNaturalLanguageAPI()
RunAllIntegerExampleCppStyleAPI()
if __name__ == '__main__':
pywrapinit.CppBridge.InitLogging('integer_programming.py')
cpp_flags = pywrapinit.CppFlags()
cpp_flags.logtostderr = True
cpp_flags.log_prefix = False
pywrapinit.CppBridge.SetFlags(cpp_flags)
main()
| {
"content_hash": "6a69cf7ff7a578b33235feb4df3f31e5",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 79,
"avg_line_length": 31.383928571428573,
"alnum_prop": 0.6981507823613087,
"repo_name": "google/or-tools",
"id": "c127031a2e750d2e4a6cf189efc6e7967803d418",
"size": "4115",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable",
"path": "examples/python/integer_programming.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18599"
},
{
"name": "C",
"bytes": "11382"
},
{
"name": "C#",
"bytes": "498888"
},
{
"name": "C++",
"bytes": "14071164"
},
{
"name": "CMake",
"bytes": "219723"
},
{
"name": "Dockerfile",
"bytes": "149476"
},
{
"name": "Java",
"bytes": "459136"
},
{
"name": "Lex",
"bytes": "2271"
},
{
"name": "Makefile",
"bytes": "207007"
},
{
"name": "Python",
"bytes": "629275"
},
{
"name": "SWIG",
"bytes": "414259"
},
{
"name": "Shell",
"bytes": "83555"
},
{
"name": "Starlark",
"bytes": "235950"
},
{
"name": "Yacc",
"bytes": "26027"
},
{
"name": "sed",
"bytes": "45"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to generate gperf tables of kernel object metadata
User mode threads making system calls reference kernel objects by memory
address, as the kernel/driver APIs in Zephyr are the same for both user
and supervisor contexts. It is necessary for the kernel to be able to
validate accesses to kernel objects to make the following assertions:
- That the memory address points to a kernel object
- The kernel object is of the expected type for the API being invoked
- The kernel object is of the expected initialization state
- The calling thread has sufficient permissions on the object
For more details see the "Kernel Objects" section in the documentation.
The zephyr build generates an intermediate ELF binary, zephyr_prebuilt.elf,
which this script scans looking for kernel objects by examining the DWARF
debug information to look for instances of data structures that are considered
kernel objects. For device drivers, the API struct pointer populated at build
time is also examined to disambiguate between various device driver instances
since they are all 'struct device'.
This script can generate five different output files:
- A gperf script to generate the hash table mapping kernel object memory
addresses to kernel object metadata, used to track permissions,
object type, initialization state, and any object-specific data.
- A header file containing generated macros for validating driver instances
inside the system call handlers for the driver subsystem APIs.
- A code fragment included by kernel.h with one enum constant for
each kernel object type and each driver instance.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping the kernel object types and driver
instances to their human-readable representation in the
otype_to_str() function.
- The inner cases of a switch/case C statement, included by
kernel/userspace.c, mapping kernel object types to their sizes.
This is used for allocating instances of them at runtime
(CONFIG_DYNAMIC_OBJECTS) in the obj_size_get() function.
"""
import sys
import argparse
import math
import os
import struct
from elf_helper import ElfHelper, kobject_to_enum
from collections import OrderedDict
# Keys in this dictionary are structs which should be recognized as kernel
# objects. Values are a tuple:
#
# - The first item is None, or the name of a Kconfig that
# indicates the presence of this object's definition in case it is not
# available in all configurations.
#
# - The second item is a boolean indicating whether it is permissible for
# the object to be located in user-accessible memory.
# Regular dictionaries are ordered only with Python 3.6 and
# above. Good summary and pointers to official documents at:
# https://stackoverflow.com/questions/39980323/are-dictionaries-ordered-in-python-3-6
kobjects = OrderedDict ([
("k_mem_slab", (None, False)),
("k_msgq", (None, False)),
("k_mutex", (None, False)),
("k_pipe", (None, False)),
("k_queue", (None, False)),
("k_poll_signal", (None, False)),
("k_sem", (None, False)),
("k_stack", (None, False)),
("k_thread", (None, False)),
("k_timer", (None, False)),
("_k_thread_stack_element", (None, False)),
("device", (None, False)),
("sys_mutex", (None, True))
])
subsystems = [
"adc_driver_api",
"aio_cmp_driver_api",
"counter_driver_api",
"crypto_driver_api",
"dma_driver_api",
"flash_driver_api",
"gpio_driver_api",
"i2c_driver_api",
"i2s_driver_api",
"ipm_driver_api",
"led_driver_api",
"pinmux_driver_api",
"pwm_driver_api",
"entropy_driver_api",
"rtc_driver_api",
"sensor_driver_api",
"spi_driver_api",
"uart_driver_api",
"can_driver_api",
]
header = """%compare-lengths
%define lookup-function-name z_object_lookup
%language=ANSI-C
%global-table
%struct-type
%{
#include <kernel.h>
#include <toolchain.h>
#include <syscall_handler.h>
#include <string.h>
%}
struct _k_object;
"""
# Different versions of gperf have different prototypes for the lookup
# function, best to implement the wrapper here. The pointer value itself is
# turned into a string, we told gperf to expect binary strings that are not
# NULL-terminated.
footer = """%%
struct _k_object *z_object_gperf_find(void *obj)
{
return z_object_lookup((const char *)obj, sizeof(void *));
}
void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
int i;
for (i = MIN_HASH_VALUE; i <= MAX_HASH_VALUE; i++) {
if (wordlist[i].name != NULL) {
func(&wordlist[i], context);
}
}
}
#ifndef CONFIG_DYNAMIC_OBJECTS
struct _k_object *z_object_find(void *obj)
ALIAS_OF(z_object_gperf_find);
void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
ALIAS_OF(z_object_gperf_wordlist_foreach);
#endif
"""
def write_gperf_table(fp, eh, objs, static_begin, static_end):
fp.write(header)
num_mutexes = eh.get_sys_mutex_counter()
if num_mutexes != 0:
fp.write("static struct k_mutex kernel_mutexes[%d] = {\n" % num_mutexes)
for i in range(num_mutexes):
fp.write("_K_MUTEX_INITIALIZER(kernel_mutexes[%d])" % i)
if i != num_mutexes - 1:
fp.write(", ")
fp.write("};\n")
fp.write("%%\n")
# Setup variables for mapping thread indexes
syms = eh.get_symbols()
thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"]
thread_idx_map = {}
for i in range(0, thread_max_bytes):
thread_idx_map[i] = 0xFF
for obj_addr, ko in objs.items():
obj_type = ko.type_name
# pre-initialized objects fall within this memory range, they are
# either completely initialized at build time, or done automatically
# at boot during some PRE_KERNEL_* phase
initialized = obj_addr >= static_begin and obj_addr < static_end
byte_str = struct.pack("<I" if eh.little_endian else ">I", obj_addr)
fp.write("\"")
for byte in byte_str:
val = "\\x%02x" % byte
fp.write(val)
fp.write(
"\",{},%s,%s,%s\n" %
(obj_type,
"K_OBJ_FLAG_INITIALIZED" if initialized else "0",
str(ko.data)))
if obj_type == "K_OBJ_THREAD":
idx = math.floor(ko.data / 8)
bit = ko.data % 8
thread_idx_map[idx] = thread_idx_map[idx] & ~(2**bit)
fp.write(footer)
# Generate the array of already mapped thread indexes
fp.write('\n')
fp.write('u8_t _thread_idx_map[%d] = {' % (thread_max_bytes))
for i in range(0, thread_max_bytes):
fp.write(' 0x%x, ' % (thread_idx_map[i]))
fp.write('};\n')
driver_macro_tpl = """
#define Z_SYSCALL_DRIVER_%(driver_upper)s(ptr, op) Z_SYSCALL_DRIVER_GEN(ptr, op, %(driver_lower)s, %(driver_upper)s)
"""
def write_validation_output(fp):
fp.write("#ifndef DRIVER_VALIDATION_GEN_H\n")
fp.write("#define DRIVER_VALIDATION_GEN_H\n")
fp.write("""#define Z_SYSCALL_DRIVER_GEN(ptr, op, driver_lower_case, driver_upper_case) \\
(Z_SYSCALL_OBJ(ptr, K_OBJ_DRIVER_##driver_upper_case) || \\
Z_SYSCALL_DRIVER_OP(ptr, driver_lower_case##_driver_api, op))
""")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write(driver_macro_tpl % {
"driver_lower": subsystem.lower(),
"driver_upper": subsystem.upper(),
})
fp.write("#endif /* DRIVER_VALIDATION_GEN_H */\n")
def write_kobj_types_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write("%s,\n" % kobject_to_enum(kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "").upper()
fp.write("K_OBJ_DRIVER_%s,\n" % subsystem)
def write_kobj_otype_output(fp):
fp.write("/* Core kernel objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
if kobj == "device":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = "%s"; break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
fp.write("/* Driver subsystems */\n")
for subsystem in subsystems:
subsystem = subsystem.replace("_driver_api", "")
fp.write('case K_OBJ_DRIVER_%s: ret = "%s driver"; break;\n' % (
subsystem.upper(),
subsystem
))
def write_kobj_size_output(fp):
fp.write("/* Non device/stack objects */\n")
for kobj, obj_info in kobjects.items():
dep, _ = obj_info
# device handled by default case. Stacks are not currently handled,
# if they eventually are it will be a special case.
if kobj == "device" or kobj == "_k_thread_stack_element":
continue
if dep:
fp.write("#ifdef %s\n" % dep)
fp.write('case %s: ret = sizeof(struct %s); break;\n' %
(kobject_to_enum(kobj), kobj))
if dep:
fp.write("#endif\n")
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-k", "--kernel", required=False,
help="Input zephyr ELF binary")
parser.add_argument(
"-g", "--gperf-output", required=False,
help="Output list of kernel object addresses for gperf use")
parser.add_argument(
"-V", "--validation-output", required=False,
help="Output driver validation macros")
parser.add_argument(
"-K", "--kobj-types-output", required=False,
help="Output k_object enum constants")
parser.add_argument(
"-S", "--kobj-otype-output", required=False,
help="Output case statements for otype_to_str()")
parser.add_argument(
"-Z", "--kobj-size-output", required=False,
help="Output case statements for obj_size_get()")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
def main():
parse_args()
if args.gperf_output:
assert args.kernel, "--kernel ELF required for --gperf-output"
eh = ElfHelper(args.kernel, args.verbose, kobjects, subsystems)
syms = eh.get_symbols()
max_threads = syms["CONFIG_MAX_THREAD_BYTES"] * 8
objs = eh.find_kobjects(syms)
if not objs:
sys.stderr.write("WARNING: zero kobject found in %s\n"
% args.kernel)
thread_counter = eh.get_thread_counter()
if thread_counter > max_threads:
sys.stderr.write("Too many thread objects (%d)\n" % thread_counter)
sys.stderr.write("Increase CONFIG_MAX_THREAD_BYTES to %d\n" %
-(-thread_counter // 8))
sys.exit(1)
with open(args.gperf_output, "w") as fp:
write_gperf_table(fp, eh, objs,
syms["_static_kernel_objects_begin"],
syms["_static_kernel_objects_end"])
if args.validation_output:
with open(args.validation_output, "w") as fp:
write_validation_output(fp)
if args.kobj_types_output:
with open(args.kobj_types_output, "w") as fp:
write_kobj_types_output(fp)
if args.kobj_otype_output:
with open(args.kobj_otype_output, "w") as fp:
write_kobj_otype_output(fp)
if args.kobj_size_output:
with open(args.kobj_size_output, "w") as fp:
write_kobj_size_output(fp)
if __name__ == "__main__":
main()
| {
"content_hash": "2bddf5e98c54c8bdcd73fdaea514ed1f",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 116,
"avg_line_length": 32.26315789473684,
"alnum_prop": 0.6212071778140293,
"repo_name": "ldts/zephyr",
"id": "c5559407df47e38c691eb7b5a57cd3e85dd011da",
"size": "12260",
"binary": false,
"copies": "2",
"ref": "refs/heads/evl-latency",
"path": "scripts/gen_kobject_list.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1294824"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "400401613"
},
{
"name": "C++",
"bytes": "4977748"
},
{
"name": "CMake",
"bytes": "609692"
},
{
"name": "EmberScript",
"bytes": "792"
},
{
"name": "Makefile",
"bytes": "3313"
},
{
"name": "Objective-C",
"bytes": "60830"
},
{
"name": "Perl",
"bytes": "198181"
},
{
"name": "Python",
"bytes": "946417"
},
{
"name": "Shell",
"bytes": "45601"
},
{
"name": "Verilog",
"bytes": "6394"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
version = "0.10.0"
setup(name="pypgqueue",
version=version,
description="A job queue based on PostgreSQL's listen/notify features",
author="Trey Cucco",
author_email="fcucco@gmail.com",
url="https://github.com/treycucco/pypgq",
download_url="https://github.com/treycucco/pypgq/tarball/master",
packages=["pypgq"],
package_data={"pypgq": ["ddl.sql"]},
install_requires=[
"bidon >= 1.0.4",
"psycopg2"
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3 :: Only"
],
license="BSD",
platforms="any")
| {
"content_hash": "4a35af433e0599bb55881011aba94880",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 33.63333333333333,
"alnum_prop": 0.5887016848364718,
"repo_name": "treycucco/pypgqueue",
"id": "429c9a1466aaaf31fbc05129ae893ac34ad2d489",
"size": "1009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15980"
}
],
"symlink_target": ""
} |
class TweetCriteria:
def __init__(self):
self.maxTweets = 0
self.within = "15mi"
def setUsername(self, username):
self.username = username
return self
def setSince(self, since):
self.since = since
return self
def setUntil(self, until):
self.until = until
return self
def setQuerySearch(self, querySearch):
self.querySearch = querySearch
return self
def setTweetType(self, tweetType):
self.tweetType = tweetType
return self
def setMaxTweets(self, maxTweets):
self.maxTweets = maxTweets
return self
def setSinceTimeStamp(self, sinceTimeStamp):
self.sinceTimeStamp = sinceTimeStamp
return self
def setUntilTimeStamp(self, untilTimeStamp):
self.untilTimeStamp = untilTimeStamp
return self
def setTopTweets(self, topTweets):
self.topTweets = topTweets
return self
def setNear(self, near):
self.near = near
return self
def setWithin(self, within):
self.within = within
return self
| {
"content_hash": "a6e03fcdb77299cb0df4b6c670b1f599",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 45,
"avg_line_length": 19.714285714285715,
"alnum_prop": 0.7215320910973085,
"repo_name": "phiedulxp/tweet_search",
"id": "335370f75bd231fefcf1b1cc703e4ebda1a481f3",
"size": "966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spider/got/manager/TweetCriteria.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "787958"
},
{
"name": "Python",
"bytes": "107622"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
} |
from math import sqrt
import numpy as np
from scipy.linalg import eig, solve, norm, inv
class StochasticLinearDiff(object):
"""
Represents and computes various things for a model in the form
of the canonical stochastic linear difference equation:
.. math::
x_{t+1} = A x_t + C w_{t+1}
"""
def __init__(self, A, C):
self.A = A
self.C = C
# Evaluate eigenvalues and vectors for use later on. Check boundedness
evals, evecs = eig(self.A, left=False, right=True)
self.evals, self.evecs = evals, evecs
self.unbounded = np.abs(evals).max() > 1
def Cx(self, j=0):
"Covariance stationary covariance matrix"
if not self.unbounded:
c_x = doublej(self.A, self.C.dot(self.C.T))
# Return if we want C_x(0)
if j == 0:
return c_x
else:
# Or evaluate C_x(abs(j))
c_xj = np.linalg.matrix_power(self.A, abs(j)).dot(c_x)
if j < 0:
return c_xj.T # transpose if j < 0
else:
return c_xj
else:
msg = 'This computation will not work because the eigenvalues'
msg += '\nof A are not all below 1 in modulus.'
raise ValueError(msg)
@property
def mu(self):
"Covariance stationary mean"
if self.unbounded:
msg = 'This computation will not work because the eigenvalues {0}'
msg += '\nof A are not all below 1 in modulus.'
raise ValueError(msg.format(self.evals))
# Try to get index of unit eigenvalue
try:
ind = np.where(self.evals == 1)[0][0]
except IndexError:
raise ValueError("The A matrix doesn't have any unit eigenvalues")
# compute Stationary mean using the eigenvector for unit eigenvalue
return self.evecs[:, ind] / self.evecs[-1, ind]
class Markov(object):
"""
Do basic things with Markov matrices.
"""
def __init__(self, P, verbose=False):
self.P = P
self.verbose = verbose
def __repr__(self):
msg = "Markov process with transition matrix P = \n{0}"
return msg.format(self.P)
def stationary_distributions(self):
evals, l_evecs, r_evecs = eig(self.P, left=True, right=True)
self.evals, self.l_evecs, self.r_evecs = evals, l_evecs, r_evecs
units = np.where(evals == 1)[0]
stationary = []
for i, ind in enumerate(units):
sd_name = 'sd{0}'.format(i + 1)
sd_vec = l_evecs[:, ind]
# Normalize to be probability vector
sd_vec = sd_vec * (-1) if all(sd_vec <= 0) else sd_vec
sd_vec /= sd_vec.sum()
self.__setattr__(sd_name, sd_vec)
stationary.append(sd_vec)
if self.verbose:
msg = 'Set instance variable %s for stationary distribution'
print(msg % sd_name)
return stationary
def invariant_distributions(self):
units = np.where(self.evals == 1)[0]
invariant = []
for i, ind in enumerate(units):
id_name = 'id{0}'.format(i + 1)
id_vec = self.r_evecs[:, ind]
self.__setattr__(id_name, id_vec)
invariant.append(id_vec)
if self.verbose:
msg = 'Set instance variable %s for invariant distribution'
print(msg % id_name)
return invariant
class SymMarkov(object):
"""
Do basic things with Markov matrices. The matrix P that is passed
to the constructor for this class is assumed to be a sympy matrix.
If it isn't, then it is cast as such.
"""
def __init__(self, P, verbose=False):
import sympy as sym
self.P = P if isinstance(P, sym.Matrix) else sym.Matrix(P)
self.verbose = verbose
def stationary_distributions(self, subs, normalize=True):
"""
Find the stationary distributions associated with the Markov
process, by substituting parameters into the transition matrix
Parameters
==========
subs : dist
A dictionary of substitutions to be passed to self.P before
doing the computation
normalize : bool, optional(default=True)
Whether or not the stationary distributions should be
normalized so they sum to 1 before returning.
Returns
=======
pi0s : list
A list of stationary distributions.
"""
# Make the substitutions
PN = self.P.subs(subs)
# Transpose gives left eigenvectors
l_vecs = PN.T.eigenvects()
# keep only unit eigenvalues around, grab the vectors
units = filter(lambda x: x[0] == 1, l_vecs)
pi0s = units[0][2] if len(units) != 0 else []
# Normalize so they sum to 1
if normalize:
pi0s = [i / sum(i) for i in pi0s]
return pi0s
def doublej(a1, b1, max_it=50):
"""
Computes the infinite sum V given by
.. math::
V = \sum_{j=0}^{\infty} a1^j b1 a1^j'
where a1 and b1 are each (n X n) matrices with eigenvalues whose
moduli are bounded by unity and b1 is an (n X n) matrix.
V is computed by using the following 'doubling algorithm'. We
iterate to convergence on V(j) on the following recursions for
j = 1, 2, ... starting from V(0) = b1:
..math::
a1_j = a1_{j-1} a1_{j-1}
V_j = V_{j-1} + A_{j-1} V_{j-1} a_{j-1}'
The limiting value is returned in V
"""
alpha0 = a1
gamma0 = b1
diff = 5
n_its = 1
while diff > 1e-15:
alpha1 = alpha0.dot(alpha0)
gamma1 = gamma0 + np.dot(alpha0.dot(gamma0), alpha0.T)
diff = np.max(np.abs(gamma1 - gamma0))
alpha0 = alpha1
gamma0 = gamma1
n_its += 1
if n_its > max_it:
raise ValueError('Exceeded maximum iterations of %i.' % (max_it) +
' Check your input matrices')
return gamma1
def doubleo(A, C, Q, R, tol=1e-15):
"""
This function uses the "doubling algorithm" to solve the Riccati
matrix difference equations associated with the Kalman filter. The
returns the gain K and the stationary covariance matrix of the
one-step ahead errors in forecasting the state.
The function creates the Kalman filter for the following system:
.. math::
x_{t+1} = A * x_t + e_{t+1}
y_t = C * x_t + v_t
where :math:`E e_{t+1} e_{t+1}' = Q`, and :math:`E v_t v_t' = R`,
and :math:`v_s' e_t = 0 \\forall s, t`.
The function creates the observer system
.. math::
xx_{t+1} = A xx_t + K a_t
y_t = C xx_t + a_t
where K is the Kalman gain, :math:`S = E (x_t - xx_t)(x_t - xx_t)'`,
and :math:`a_t = y_t - E[y_t| y_{t-1}, y_{t-2}, \dots ]`, and
:math:`xx_t = E[x_t|y_{t-1},\dots]`.
Parameters
----------
A : array_like, dtype=float, shape=(n, n)
The matrix A in the law of motion for x
C : array_like, dtype=float, shape=(k, n)
Q : array_like, dtype=float, shape=(n, n)
R : array_like, dtype=float, shape=(k, k)
tol : float, optional(default=1e-15)
Returns
-------
K : array_like, dtype=float
The Kalman gain K
S : array_like, dtype=float
The stationary covariance matrix of the one-step ahead errors
in forecasting the state.
Notes
-----
By using DUALITY, control problems can also be solved.
"""
a0 = A.T
b0 = C.T.dot(solve(R, C))
g0 = Q
dd = 1
ss = max(A.shape)
v = np.eye(ss)
while dd > tol:
a1 = a0.dot(solve(v + np.dot(b0, g0), a0))
b1 = b0 + a0.dot(solve(v + np.dot(b0, g0), b0.dot(a0.T)))
g1 = g0 + np.dot(a0.T.dot(g0), solve(v + b0.dot(g0), a0))
k1 = np.dot(A.dot(g1), solve(np.dot(C, g1.T).dot(C.T) + R.T, C).T)
k0 = np.dot(A.dot(g0), solve(np.dot(C, g0.T).dot(C.T) + R.T, C).T)
a0=a1
b0=b1
g0=g1
dd = np.max(k1 - k0)
return k1, g1
def markov(T, n=100, s0=0, V=None):
"""
Generates a simulation of the Markov chain described by a transition
matrix.
Parameters
==========
T : array_like, dtype=float, ndim=2
The Markov transition matrix that describes the model
n : integer, optional(default=100)
How many steps to simulate the chain
s0 : int, optional(default=0)
The initial state. Should be a value between 0 and T.shape[0]
- 1 because it will be used as a python index.
V : array_like, dtype=float, optional(default=range(T.shape[0]))
The 1d array to specify numerical value associated with each
state
Returns
=======
chain : array_like, dtype=float
The simulated state
state : array_like, dtype=int
The time series of state values
"""
r, c = T.shape
if V is None:
V = np.arange(r)
if r != c:
raise ValueError('T must be a square matrix')
_row_sums = T.sum(axis=1)
if not all(_row_sums == 1):
bad = np.where(_row_sums != 1)
msg = 'All rows of T must sum to 1. Column(s) %s do not'
raise ValueError(msg % (bad[0]))
if V.ndim != 1:
V = V.flatten()
if V.size != r:
msg = 'V must be 1-dimensional array of length %i' % (r)
raise ValueError(msg)
if s0 < 0 or s0 > (r - 1):
msg = 'Value of s0 (%i) must be between 0 and T.shape[0] (%i)'
raise ValueError(msg % (s0, r - 1))
X = np.random.rand(n - 1)
s = np.zeros(r)
s[s0] = 1
cdf = np.cumsum(T, axis=1)
state = np.empty((r, n - 1))
for k in range(n - 1):
state[:, k] = s
ppi = np.concatenate([[0.], s.dot(cdf)])
s = (X[k] <= ppi[1:]) * (X[k] > ppi[:-1])
chain = V.dot(state)
return chain, state
def olrp(beta, A, B, Q, R, W=None, tol=1e-6, max_iter=1000):
"""
Calculates F of the feedback law:
.. math::
U = -Fx
that maximizes the function:
.. math::
\sum \{beta^t [x'Qx + u'Ru +2x'Wu] \}
subject to
.. math::
x_{t+1} = A x_t + B u_t
where x is the nx1 vector of states, u is the kx1 vector of controls
Parameters
----------
beta : float
The discount factor from above. If there is no discounting, set
this equal to 1.
A : array_like, dtype=float, shape=(n, n)
The matrix A in the law of motion for x
B : array_like, dtype=float, shape=(n, k)
The matrix B in the law of motion for x
Q : array_like, dtype=float, shape=(n, n)
The matrix Q from the objective function
R : array_like, dtype=float, shape=(k, k)
The matrix R from the objective function
W : array_like, dtype=float, shape=(n, k), optional(default=0)
The matrix W from the objective function. Represents the cross
product terms.
tol : float, optional(default=1e-6)
Convergence tolerance for case when largest eigenvalue is below
1e-5 in modulus
max_iter : int, optional(default=1000)
The maximum number of iterations the function will allow before
stopping
Returns
-------
F : array_like, dtype=float
The feedback law from the equation above.
P : array_like, dtype=float
The steady-state solution to the associated discrete matrix
Riccati equation
"""
m = max(A.shape)
rc, cb = np.atleast_2d(B).shape
if W is None:
W = np.zeros((m, cb))
if np.max(np.abs(eig(R)[0])) > 1e-5:
A = sqrt(beta) * (A - B.dot(solve(R, W.T)))
B = sqrt(beta) * B
Q = Q - W.dot(solve(R, W.T))
k, s = doubleo(A.T, B.T, Q, R)
f = k.T + solve(R, W.T)
p = s
else:
p0 = -0.1 * np.eye(m)
dd = 1
it = 1
for it in range(max_iter):
f0 = solve(R + beta * B.T.dot(p0).dot(B),
beta * B.T.dot(p0).dot(A) + W.T)
p1 = beta * A.T.dot(p0).dot(A) + Q - \
(beta * A.T.dot(p0).dot(B) + W).dot(f0)
f1 = solve(R + beta * B.T.dot(p1).dot(B),
beta * B.T.dot(p1).dot(A) + W.T)
dd = np.max(f1 - f0)
p0 = p1
if dd > tol:
break
else:
msg = 'No convergence: Iteration limit of {0} reached in OLRP'
raise ValueError(msg.format(max_iter))
f = f1
p = p1
return f, p
def ricatti(beta, A, B, R, Q, H, tol=1e-6, maxiter=1000):
"""
Calculates F of the feedback law:
.. math::
U = -Fx
that maximizes the function:
.. math::
\sum \{beta^t [x'Qx + u'Ru +2x'Wu] \}
subject to
.. math::
x_{t+1} = A x_t + B u_t
where x is the nx1 vector of states, u is the kx1 vector of controls
Parameters
----------
beta : float
The discount factor from above. If there is no discounting, set
this equal to 1.
A : array_like, dtype=float, shape=(n, n)
The matrix A in the law of motion for x
B : array_like, dtype=float, shape=(n, k)
The matrix B in the law of motion for x
R : array_like, dtype=float, shape=(k, k)
The matrix R from the objective function
Q : array_like, dtype=float, shape=(n, n)
The matrix Q from the objective function
H : array_like, dtype=float, shape=(n, k), optional(default=0)
The matrix W from the objective function. Represents the cross
product terms.
tol : float, optional(default=1e-6)
Convergence tolerance for case when largest eigenvalue is below
1e-5 in modulus
max_iter : int, optional(default=1000)
The maximum number of iterations the function will allow before
stopping
Returns
-------
F : array_like, dtype=float
The feedback law from the equation above.
P : array_like, dtype=float
The steady-state solution to the associated discrete matrix
Riccati equation
"""
n = A.shape[0]
k = np.ascontiguousarray(Q).shape[0]
A, B, R, Q, H = map(np.matrix, [A, B, R, Q, H])
A = A.reshape(n, n)
B = B.reshape(n, k)
Q = Q.reshape(k, k)
R = R.reshape(n, n)
H = H.reshape(k, n)
# Start with an initial P matrix
p0 = np.zeros((n, n))
p1 = np.zeros((n, n))
# Define some variables necessary to enter while loop
dist = 10.
iters = 0
while dist > tol and iters < maxiter:
p1 = R + beta*A.T*p0*A - ((beta*A.T*p0*B + H.T) *
inv(Q + beta*B.T*p0*B) *
(beta*B.T*p0*A + H))
dist = norm(p1 - p0)
print("Iteration is %i and norm is %.3e" % (iters, dist))
p0 = p1
P = p0
F = inv((Q + beta*B.T.dot(P.dot(B)))).dot(beta*B.T.dot(P.dot(A)) + H)
return map(np.array, [F, P])
if __name__ == '__main__':
P = np.array([[.7, .3], [.2, .8]])
c, s = markov(P, n=2000, V=np.array([1., 2.]))
| {
"content_hash": "61b1a51262a78e4a4d568445d461e6bf",
"timestamp": "",
"source": "github",
"line_count": 553,
"max_line_length": 78,
"avg_line_length": 27.44122965641953,
"alnum_prop": 0.54332784184514,
"repo_name": "spencerlyon2/econtools",
"id": "5953e30f5a2925f108a1eb9930fb75860768121e",
"size": "15175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/econtools/macro/rmt_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25537"
}
],
"symlink_target": ""
} |
"""SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.utils import DNS_NAME
from django.core.mail.message import sanitize_address
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, use_ssl=None, timeout=None,
**kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = settings.EMAIL_HOST_USER if username is None else username
self.password = settings.EMAIL_HOST_PASSWORD if password is None else password
self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls
self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl
self.timeout = timeout
if self.use_ssl and self.use_tls:
raise ValueError(
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True.")
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_class = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
try:
self.connection = connection_class(self.host, self.port, **connection_params)
# TLS/SSL are mutually exclusive, so only attempt TLS over
# non-secure connections.
if not self.use_ssl and self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
except smtplib.SMTPException:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except smtplib.SMTPException:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
try:
self.connection.sendmail(from_email, recipients, message.as_bytes())
except smtplib.SMTPException:
if not self.fail_silently:
raise
return False
return True
| {
"content_hash": "2f4c935d4fb23750006a92e20fb771c3",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 89,
"avg_line_length": 39.36974789915966,
"alnum_prop": 0.5910352187833511,
"repo_name": "Beeblio/django",
"id": "e72e372ca919be83ee6faca162b13d9137f6b920",
"size": "4685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/core/mail/backends/smtp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42830"
},
{
"name": "HTML",
"bytes": "173915"
},
{
"name": "JavaScript",
"bytes": "102290"
},
{
"name": "Makefile",
"bytes": "239"
},
{
"name": "Python",
"bytes": "9172420"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import time
from threading import Lock
class Buffer(object):
def __init__(self):
self.__list__ = []
self.__lock__ = Lock()
self.__loading__ = False
def consume(self, source, adapter=None, chunked=False):
self.__loading__ = True
try:
for o in source:
if adapter is not None:
o = adapter(o)
if chunked is True:
self.extend(o)
else:
self.put(o)
except Exception, ex:
message = "Error loading buffer-> %s" % ex.message
raise Exception(message, ex)
finally:
self.__loading__ = False
def extend(self, values):
with self.__lock__:
self.__list__.append(values)
return self
def append(self, value):
with self.__lock__:
if isinstance(value, list) is True:
map(self.__list__.append, value)
else:
self.__list__.append(value)
return self
def set(self, value):
return self.append(value)
def get(self):
with self.__lock__:
try:
o = self.__list__.pop()
return o
except:
return None
def pop(self):
return self.get()
def __iter__(self):
return self.next()
def next(self):
while True:
# if self.empty is True:
# if self.__loading__ is True:
# time.sleep(.01)
# continue
# break
o = self.pop()
if o is None and self.empty is True:
if self.__loading__ is True:
time.sleep(.01)
continue
break
yield o
@property
def list(self):
return self.__list__
@classmethod
def create(cls):
return cls()
def __repr__(self):
return "Buffer#{cnt}".format(cnt=self.__len__())
def __str__(self):
return self.__repr__()
def __len__(self):
return len(self.__list__)
@property
def empty(self):
cnt = len(self.__list__)
return True if cnt == 0 else False
class Pipeline(object):
def __init__(self, fn):
self.fn = fn
self.endpoint = None
def connect(self, fn):
if self.endpoint is not None:
return self.endpoint.connect(fn)
#raise Exception("An endpoint has already been connected!")
pipeline = Pipeline(fn)#, self.output)
self.endpoint = pipeline
def pump(self, o):
v = self.fn(o)
if self.endpoint is not None:
return self.endpoint.pump(v)
return v
def process(self, inputs, *outputs):
if len(outputs) == 0:
return map(self.pump, inputs)
outputs = outputs[0]
pump = self.pump
for a in inputs:
b = pump(a)
outputs.append(b)
return outputs
def multiplex(self, inputs, worker_count=2):
from fuze import util
cnt = len(inputs)
outputs = Buffer.create()
for x in xrange(worker_count):
util.dispatch(self.process, inputs, outputs)
self.process(inputs, outputs)
while inputs.empty is False:
util.sleep(.01)
while len(outputs) < cnt:
util.sleep(.01)
return outputs.list
def __repr__(self):
return "Pipeline"
def __str__(self):
return self.__repr__()
class Wrapper(dict):
"""
A Wrapper object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = Wrapper(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
# def get(self, key, **kwd):
# try:
# return self[key]
# except:
# if kwd.keys() > 0:
# try:
# return kwd["default"]
# except:
# pass
# raise AttributeError("Unable to get the wrapper attribute %s-> " % key)
# def __getattr__(self, method):
# fn = getattr(self.o, method)
# return fn
# def __getitem__(self, item):
# return None
# def override(self, other):
# def override(a, b):
# keys = b.keys()
# for key in keys:
# o = b[key]
# if isinstance(o, dict) is True:
# i = a[key]
# for k in o.keys():
# i[k] = o[k]
# else:
# a[key] = o
#
# override(self, other)
# return self
def override(self, other):
def override(a, b):
keys = b.keys()
for key in keys:
o = b[key]
if isinstance(o, dict) is True:
try:
i = a[key]
for k in o.keys():
i[k] = o[k]
except KeyError:
a[key] = o
else:
a[key] = o
override(self, other)
return self
def __getattr__(self, key):
try:
o = self[key]
if isinstance(o, dict) is True:
if isinstance(o, Wrapper) is False:
o = Wrapper.create(o)
self[key] = o
#return Wrapper.create(o)
return o
except KeyError, ex:
return None
#raise AttributeError("Unable to get the wrapper attribute %s-> %s" % (key, ex.message))
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, ex:
raise AttributeError("Unable to delete the wrapper attribute %s-> %s" % (key, ex.message))
def reduce(self, fn=None):
obj = {}
keys = self.keys()
for key in keys:
v = self[key]
if isinstance(v, list) and len(v) > 0 and hasattr(v[0], "reduce"):
for x in xrange(len(v)):
v[x] = v[x].reduce()
obj[key] = v
if fn:
return fn(obj)
return obj
def clone(self):
return Wrapper(self.copy())
def __repr__(self):
return '<Wrapper ' + dict.__repr__(self) + '>'
@staticmethod
def create(*args, **kwargs):
if args and len(args) > 0:
return Wrapper(args[0])
return Wrapper(kwargs)
class SafeWrapper(Wrapper):
def __getattr__(self, key):
try:
return self[key]
except KeyError:
return None
def clone(self):
return SafeWrapper(self.copy())
| {
"content_hash": "6013e674a700aa1dbc0860fac561b2f7",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 102,
"avg_line_length": 24.83450704225352,
"alnum_prop": 0.4537076421380973,
"repo_name": "MagicWishMonkey/fuze",
"id": "048959d0583164b2612a8540c55079ee21c33e82",
"size": "7053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuze/utilities/structs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "312954"
},
{
"name": "Shell",
"bytes": "107"
}
],
"symlink_target": ""
} |
import argparse
import cgi
import colorsys
import difflib
import random
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import webbrowser
class TokenContext(object):
"""Metadata about a token.
Attributes:
row: Row index of the token in the data file.
column: Column index of the token in the data file.
token: The token string.
commit: A Commit object that corresponds to the commit that added
this token.
"""
def __init__(self, row, column, token, commit=None):
self.row = row
self.column = column
self.token = token
self.commit = commit
class Commit(object):
"""Commit data.
Attributes:
hash: The commit hash.
author_name: The author's name.
author_email: the author's email.
author_date: The date and time the author created this commit.
message: The commit message.
diff: The commit diff.
"""
def __init__(self, hash, author_name, author_email, author_date, message,
diff):
self.hash = hash
self.author_name = author_name
self.author_email = author_email
self.author_date = author_date
self.message = message
self.diff = diff
def tokenize_data(data, tokenize_by_char, tokenize_whitespace):
"""Tokenizes |data|.
Args:
data: String to tokenize.
tokenize_by_char: If true, individual characters are treated as tokens.
Otherwise, tokens are either symbols or strings of both alphanumeric
characters and underscores.
tokenize_whitespace: Treat non-newline whitespace characters as tokens.
Returns:
A list of lists of TokenContexts. Each list represents a line.
"""
contexts = []
in_identifier = False
identifier_start = 0
identifier = ''
row = 0
column = 0
line_contexts = []
for c in data:
if not tokenize_by_char and (c.isalnum() or c == '_'):
if in_identifier:
identifier += c
else:
in_identifier = True
identifier_start = column
identifier = c
else:
if in_identifier:
line_contexts.append(TokenContext(row, identifier_start, identifier))
in_identifier = False
if not c.isspace() or (tokenize_whitespace and c != '\n'):
line_contexts.append(TokenContext(row, column, c))
if c == '\n':
row += 1
column = 0
contexts.append(line_contexts)
line_tokens = []
line_contexts = []
else:
column += 1
contexts.append(line_contexts)
return contexts
def compute_unified_diff(old_tokens, new_tokens):
"""Computes the diff between |old_tokens| and |new_tokens|.
Args:
old_tokens: Token strings corresponding to the old data.
new_tokens: Token strings corresponding to the new data.
Returns:
The diff, in unified diff format.
"""
return difflib.unified_diff(old_tokens, new_tokens, n=0, lineterm='')
def parse_chunk_header_file_range(file_range):
"""Parses a chunk header file range.
Diff chunk headers have the form:
@@ -<file-range> +<file-range> @@
File ranges have the form:
<start line number>,<number of lines changed>
Args:
file_range: A chunk header file range.
Returns:
A tuple (range_start, range_end). The endpoints are adjusted such that
iterating over [range_start, range_end) will give the changed indices.
"""
if ',' in file_range:
file_range_parts = file_range.split(',')
start = int(file_range_parts[0])
amount = int(file_range_parts[1])
if amount == 0:
return (start, start)
return (start - 1, start + amount - 1)
else:
return (int(file_range) - 1, int(file_range))
def compute_changed_token_indices(previous_tokens, current_tokens):
"""Computes changed and added tokens.
Args:
previous_tokens: Tokens corresponding to the old file.
current_tokens: Tokens corresponding to the new file.
Returns:
A tuple (added_tokens, changed_tokens).
added_tokens: A list of indices into |current_tokens|.
changed_tokens: A map of indices into |current_tokens| to
indices into |previous_tokens|.
"""
prev_file_chunk_end = 0
prev_patched_chunk_end = 0
added_tokens = []
changed_tokens = {}
for line in compute_unified_diff(previous_tokens, current_tokens):
if line.startswith("@@"):
parts = line.split(' ')
removed = parts[1].lstrip('-')
removed_start, removed_end = parse_chunk_header_file_range(removed)
added = parts[2].lstrip('+')
added_start, added_end = parse_chunk_header_file_range(added)
for i in range(added_start, added_end):
added_tokens.append(i)
for i in range(0, removed_start - prev_patched_chunk_end):
changed_tokens[prev_file_chunk_end + i] = prev_patched_chunk_end + i
prev_patched_chunk_end = removed_end
prev_file_chunk_end = added_end
for i in range(0, len(previous_tokens) - prev_patched_chunk_end):
changed_tokens[prev_file_chunk_end + i] = prev_patched_chunk_end + i
return added_tokens, changed_tokens
def flatten_nested_list(l):
"""Flattens a list and provides a mapping from elements in the list back
into the nested list.
Args:
l: A list of lists.
Returns:
A tuple (flattened, index_to_position):
flattened: The flattened list.
index_to_position: A list of pairs (r, c) such that
index_to_position[i] == (r, c); flattened[i] == l[r][c]
"""
flattened = []
index_to_position = {}
r = 0
c = 0
for nested_list in l:
for element in nested_list:
index_to_position[len(flattened)] = (r, c)
flattened.append(element)
c += 1
r += 1
c = 0
return (flattened, index_to_position)
def compute_changed_token_positions(previous_tokens, current_tokens):
"""Computes changed and added token positions.
Args:
previous_tokens: A list of lists of token strings. Lines in the file
correspond to the nested lists.
current_tokens: A list of lists of token strings. Lines in the file
correspond to the nested lists.
Returns:
A tuple (added_token_positions, changed_token_positions):
added_token_positions: A list of pairs that index into |current_tokens|.
changed_token_positions: A map from pairs that index into
|current_tokens| to pairs that index into |previous_tokens|.
"""
flat_previous_tokens, previous_index_to_position = flatten_nested_list(
previous_tokens)
flat_current_tokens, current_index_to_position = flatten_nested_list(
current_tokens)
added_indices, changed_indices = compute_changed_token_indices(
flat_previous_tokens, flat_current_tokens)
added_token_positions = [current_index_to_position[i] for i in added_indices]
changed_token_positions = {
current_index_to_position[current_i]:
previous_index_to_position[changed_indices[current_i]]
for current_i in changed_indices
}
return (added_token_positions, changed_token_positions)
def parse_chunks_from_diff(diff):
"""Returns a generator of chunk data from a diff.
Args:
diff: A list of strings, with each string being a line from a diff
in unified diff format.
Returns:
A generator of tuples (added_lines_start, added_lines_end, removed_lines)
"""
it = iter(diff)
for line in it:
while not line.startswith('@@'):
line = it.next()
parts = line.split(' ')
previous_start, previous_end = parse_chunk_header_file_range(
parts[1].lstrip('-'))
current_start, current_end = parse_chunk_header_file_range(
parts[2].lstrip('+'))
in_delta = False
added_lines_start = None
added_lines_end = None
removed_lines = []
while previous_start < previous_end or current_start < current_end:
line = it.next()
firstchar = line[0]
line = line[1:]
if not in_delta and (firstchar == '-' or firstchar == '+'):
in_delta = True
added_lines_start = current_start
added_lines_end = current_start
removed_lines = []
if firstchar == '-':
removed_lines.append(line)
previous_start += 1
elif firstchar == '+':
current_start += 1
added_lines_end = current_start
elif firstchar == ' ':
if in_delta:
in_delta = False
yield (added_lines_start, added_lines_end, removed_lines)
previous_start += 1
current_start += 1
if in_delta:
yield (added_lines_start, added_lines_end, removed_lines)
def should_skip_commit(commit):
"""Decides if |commit| should be skipped when computing the blame.
Commit 5d4451e deleted all files in the repo except for DEPS. The
next commit, 1e7896, brought them back. This is a hack to skip
those commits (except for the files they modified). If we did not
do this, changes would be incorrectly attributed to 1e7896.
Args:
commit: A Commit object.
Returns:
A boolean indicating if this commit should be skipped.
"""
banned_commits = [
'1e78967ed2f1937b3809c19d91e7dd62d756d307',
'5d4451ebf298d9d71f716cc0135f465cec41fcd0',
]
if commit.hash not in banned_commits:
return False
banned_commits_file_exceptions = [
'DEPS',
'chrome/browser/ui/views/file_manager_dialog_browsertest.cc',
]
for line in commit.diff:
if line.startswith('---') or line.startswith('+++'):
if line.split(' ')[1] in banned_commits_file_exceptions:
return False
elif line.startswith('@@'):
return True
assert False
def generate_substrings(file):
"""Generates substrings from a file stream, where substrings are
separated by '\0'.
For example, the input:
'a\0bc\0\0\0d\0'
would produce the output:
['a', 'bc', 'd']
Args:
file: A readable file.
"""
BUF_SIZE = 448 # Experimentally found to be pretty fast.
data = []
while True:
buf = file.read(BUF_SIZE)
parts = buf.split('\0')
data.append(parts[0])
if len(parts) > 1:
joined = ''.join(data)
if joined != '':
yield joined
for i in range(1, len(parts) - 1):
if parts[i] != '':
yield parts[i]
data = [parts[-1]]
if len(buf) < BUF_SIZE:
joined = ''.join(data)
if joined != '':
yield joined
return
def generate_commits(git_log_stdout):
"""Parses git log output into a stream of Commit objects.
"""
substring_generator = generate_substrings(git_log_stdout)
while True:
hash = substring_generator.next()
author_name = substring_generator.next()
author_email = substring_generator.next()
author_date = substring_generator.next()
message = substring_generator.next().rstrip('\n')
diff = substring_generator.next().split('\n')[1:-1]
yield Commit(hash, author_name, author_email, author_date, message, diff)
def uberblame_aux(file_name, git_log_stdout, data, tokenization_method):
"""Computes the uberblame of file |file_name|.
Args:
file_name: File to uberblame.
git_log_stdout: A file object that represents the git log output.
data: A string containing the data of file |file_name|.
tokenization_method: A function that takes a string and returns a list of
TokenContexts.
Returns:
A tuple (data, blame).
data: File contents.
blame: A list of TokenContexts.
"""
blame = tokenization_method(data)
blamed_tokens = 0
total_tokens = len(blame)
uber_blame = (data, blame[:])
for commit in generate_commits(git_log_stdout):
if should_skip_commit(commit):
continue
offset = 0
for (added_lines_start, added_lines_end,
removed_lines) in parse_chunks_from_diff(commit.diff):
added_lines_start += offset
added_lines_end += offset
previous_contexts = [
token_lines
for line_previous in removed_lines
for token_lines in tokenization_method(line_previous)
]
previous_tokens = [[context.token for context in contexts]
for contexts in previous_contexts]
current_contexts = blame[added_lines_start:added_lines_end]
current_tokens = [[context.token for context in contexts]
for contexts in current_contexts]
added_token_positions, changed_token_positions = (
compute_changed_token_positions(previous_tokens, current_tokens))
for r, c in added_token_positions:
current_contexts[r][c].commit = commit
blamed_tokens += 1
for r, c in changed_token_positions:
pr, pc = changed_token_positions[(r, c)]
previous_contexts[pr][pc] = current_contexts[r][c]
assert added_lines_start <= added_lines_end <= len(blame)
current_blame_size = len(blame)
blame[added_lines_start:added_lines_end] = previous_contexts
offset += len(blame) - current_blame_size
assert blame == [] or blame == [[]]
return uber_blame
def uberblame(file_name, revision, tokenization_method):
"""Computes the uberblame of file |file_name|.
Args:
file_name: File to uberblame.
revision: The revision to start the uberblame at.
tokenization_method: A function that takes a string and returns a list of
TokenContexts.
Returns:
A tuple (data, blame).
data: File contents.
blame: A list of TokenContexts.
"""
DIFF_CONTEXT = 3
cmd_git_log = [
'git', 'log', '--minimal', '--no-prefix', '--follow', '-m',
'--first-parent', '-p',
'-U%d' % DIFF_CONTEXT, '-z', '--format=%x00%H%x00%an%x00%ae%x00%ad%x00%B',
revision, '--', file_name
]
git_log = subprocess.Popen(
cmd_git_log, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data = subprocess.check_output(
['git', 'show', '%s:%s' % (revision, file_name)])
data, blame = uberblame_aux(file_name, git_log.stdout, data,
tokenization_method)
_, stderr = git_log.communicate()
if git_log.returncode != 0:
raise subprocess.CalledProcessError(git_log.returncode, cmd_git_log, stderr)
return data, blame
def generate_pastel_color():
"""Generates a random color from a nice looking pastel palette.
Returns:
The color, formatted as hex string. For example, white is "#FFFFFF".
"""
(h, l, s) = (random.uniform(0, 1), random.uniform(0.8, 0.9), random.uniform(
0.5, 1))
(r, g, b) = colorsys.hls_to_rgb(h, l, s)
return "#%0.2X%0.2X%0.2X" % (int(r * 255), int(g * 255), int(b * 255))
def colorize_diff(diff):
"""Colorizes a diff for use in an HTML page.
Args:
diff: The diff, in unified diff format, as a list of line strings.
Returns:
The HTML-formatted diff, as a string. The diff will already be escaped.
"""
colorized = []
for line in diff:
escaped = cgi.escape(line.replace('\r', ''), quote=True)
if line.startswith('+'):
colorized.append('<span class=\\"addition\\">%s</span>' % escaped)
elif line.startswith('-'):
colorized.append('<span class=\\"deletion\\">%s</span>' % escaped)
elif line.startswith('@@'):
context_begin = escaped.find('@@', 2)
assert context_begin != -1
colorized.append(
'<span class=\\"chunk_meta\\">%s</span>'
'<span class=\\"chunk_context\\">%s</span'
% (escaped[0:context_begin + 2], escaped[context_begin + 2:]))
elif line.startswith('diff') or line.startswith('index'):
colorized.append('<span class=\\"file_header\\">%s</span>' % escaped)
else:
colorized.append('<span class=\\"context_line\\">%s</span>' % escaped)
return '\n'.join(colorized)
def create_visualization(data, blame):
"""Creates a web page to visualize |blame|.
Args:
data: The data file as returned by uberblame().
blame: A list of TokenContexts as returned by uberblame().
Returns:
The HTML for the generated page, as a string.
"""
# Use the same seed for the color generator on each run so that
# loading the same blame of the same file twice will result in the
# same generated HTML page.
random.seed(0x52937865ec62d1ea)
html = """\
<html>
<head>
<style>
body {
font-family: monospace;
}
pre {
display: inline;
}
.token {
outline: 1pt solid #00000030;
outline-offset: -1pt;
cursor: pointer;
}
.addition {
color: #080;
}
.deletion {
color: #c00;
}
.chunk_meta {
color: #099;
}
.context_line .chunk_context {
// Just normal text.
}
.file_header {
font-weight: bold;
}
#linenums {
text-align: right;
}
#file_display {
position: absolute;
left: 0;
top: 0;
width: 50%%;
height: 100%%;
overflow: scroll;
}
#commit_display_container {
position: absolute;
left: 50%%;
top: 0;
width: 50%%;
height: 100%%;
overflow: scroll;
}
</style>
<script>
commit_data = %s;
function display_commit(hash) {
var e = document.getElementById("commit_display");
e.innerHTML = commit_data[hash]
}
</script>
</head>
<body>
<div id="file_display">
<table>
<tbody>
<tr>
<td valign="top" id="linenums">
<pre>%s</pre>
</td>
<td valign="top">
<pre>%s</pre>
</td>
</tr>
</tbody>
</table>
</div>
<div id="commit_display_container" valign="top">
<pre id="commit_display" />
</div>
</body>
</html>
"""
html = textwrap.dedent(html)
commits = {}
lines = []
commit_colors = {}
blame_index = 0
blame = [context for contexts in blame for context in contexts]
row = 0
lastline = ''
for line in data.split('\n'):
lastline = line
column = 0
for c in line + '\n':
if blame_index < len(blame):
token_context = blame[blame_index]
if (row == token_context.row and
column == token_context.column + len(token_context.token)):
if (blame_index + 1 == len(blame) or blame[blame_index].commit.hash !=
blame[blame_index + 1].commit.hash):
lines.append('</span>')
blame_index += 1
if blame_index < len(blame):
token_context = blame[blame_index]
if row == token_context.row and column == token_context.column:
if (blame_index == 0 or blame[blame_index - 1].commit.hash !=
blame[blame_index].commit.hash):
hash = token_context.commit.hash
commits[hash] = token_context.commit
if hash not in commit_colors:
commit_colors[hash] = generate_pastel_color()
color = commit_colors[hash]
lines.append(('<span class="token" style="background-color: %s" ' +
'onclick="display_commit("%s")">') % (color,
hash))
lines.append(cgi.escape(c))
column += 1
row += 1
commit_data = ['{\n']
commit_display_format = """\
commit: {hash}
Author: {author_name} <{author_email}>
Date: {author_date}
{message}
"""
commit_display_format = textwrap.dedent(commit_display_format)
links = re.compile(r'(https?:\/\/\S+)')
for hash in commits:
commit = commits[hash]
commit_display = commit_display_format.format(
hash=hash,
author_name=commit.author_name,
author_email=commit.author_email,
author_date=commit.author_date,
message=commit.message)
commit_display = cgi.escape(commit_display, quote=True)
commit_display += colorize_diff(commit.diff)
commit_display = re.sub(links, '<a href=\\"\\1\\">\\1</a>', commit_display)
commit_display = commit_display.replace('\n', '\\n')
commit_data.append('"%s": "%s",\n' % (hash, commit_display))
commit_data.append('}')
commit_data = ''.join(commit_data)
line_nums = range(1, row if lastline.strip() == '' else row + 1)
line_nums = '\n'.join([str(num) for num in line_nums])
lines = ''.join(lines)
return html % (commit_data, line_nums, lines)
def show_visualization(html):
"""Display |html| in a web browser.
Args:
html: The contents of the file to display, as a string.
"""
# Keep the temporary file around so the browser has time to open it.
# TODO(thomasanderson): spin up a temporary web server to serve this
# file so we don't have to leak it.
html_file = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
html_file.write(html)
html_file.flush()
if sys.platform.startswith('linux'):
# Don't show any messages when starting the browser.
saved_stdout = os.dup(1)
saved_stderr = os.dup(2)
os.close(1)
os.close(2)
os.open(os.devnull, os.O_RDWR)
os.open(os.devnull, os.O_RDWR)
webbrowser.open('file://' + html_file.name)
if sys.platform.startswith('linux'):
os.dup2(saved_stdout, 1)
os.dup2(saved_stderr, 2)
os.close(saved_stdout)
os.close(saved_stderr)
def main(argv):
parser = argparse.ArgumentParser(
description='Show what revision last modified each token of a file.')
parser.add_argument(
'revision',
default='HEAD',
nargs='?',
help='show only commits starting from a revision')
parser.add_argument('file', help='the file to uberblame')
parser.add_argument(
'--skip-visualization',
action='store_true',
help='do not display the blame visualization in a web browser')
parser.add_argument(
'--tokenize-by-char',
action='store_true',
help='treat individual characters as tokens')
parser.add_argument(
'--tokenize-whitespace',
action='store_true',
help='also blame non-newline whitespace characters')
args = parser.parse_args(argv)
def tokenization_method(data):
return tokenize_data(data, args.tokenize_by_char, args.tokenize_whitespace)
data, blame = uberblame(args.file, args.revision, tokenization_method)
html = create_visualization(data, blame)
if not args.skip_visualization:
show_visualization(html)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "e053f0d18a9f6d88e412d4757ea7243c",
"timestamp": "",
"source": "github",
"line_count": 722,
"max_line_length": 80,
"avg_line_length": 30.918282548476455,
"alnum_prop": 0.6219594140572504,
"repo_name": "ric2b/Vivaldi-browser",
"id": "89b6d88b333b1b0010a8e04fa205d66b15d9ecee",
"size": "22508",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chromium/tools/uberblame.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Regexp-based split and replace using the obsolete regex module.
This module is only for backward compatibility. These operations
are now provided by the new regular expression module, "re".
sub(pat, repl, str): replace first occurrence of pattern in string
gsub(pat, repl, str): replace all occurrences of pattern in string
split(str, pat, maxsplit): split string using pattern as delimiter
splitx(str, pat, maxsplit): split string using pattern as delimiter plus
return delimiters
"""
import warnings
warnings.warn("the regsub module is deprecated; please use re.sub()",
DeprecationWarning)
# Ignore further deprecation warnings about this module
warnings.filterwarnings("ignore", "", DeprecationWarning, __name__)
import regex
__all__ = ["sub","gsub","split","splitx","capwords"]
# Replace first occurrence of pattern pat in string str by replacement
# repl. If the pattern isn't found, the string is returned unchanged.
# The replacement may contain references \digit to subpatterns and
# escaped backslashes. The pattern may be a string or an already
# compiled pattern.
def sub(pat, repl, str):
prog = compile(pat)
if prog.search(str) >= 0:
regs = prog.regs
a, b = regs[0]
str = str[:a] + expand(repl, regs, str) + str[b:]
return str
# Replace all (non-overlapping) occurrences of pattern pat in string
# str by replacement repl. The same rules as for sub() apply.
# Empty matches for the pattern are replaced only when not adjacent to
# a previous match, so e.g. gsub('', '-', 'abc') returns '-a-b-c-'.
def gsub(pat, repl, str):
prog = compile(pat)
new = ''
start = 0
first = 1
while prog.search(str, start) >= 0:
regs = prog.regs
a, b = regs[0]
if a == b == start and not first:
if start >= len(str) or prog.search(str, start+1) < 0:
break
regs = prog.regs
a, b = regs[0]
new = new + str[start:a] + expand(repl, regs, str)
start = b
first = 0
new = new + str[start:]
return new
# Split string str in fields separated by delimiters matching pattern
# pat. Only non-empty matches for the pattern are considered, so e.g.
# split('abc', '') returns ['abc'].
# The optional 3rd argument sets the number of splits that are performed.
def split(str, pat, maxsplit = 0):
return intsplit(str, pat, maxsplit, 0)
# Split string str in fields separated by delimiters matching pattern
# pat. Only non-empty matches for the pattern are considered, so e.g.
# split('abc', '') returns ['abc']. The delimiters are also included
# in the list.
# The optional 3rd argument sets the number of splits that are performed.
def splitx(str, pat, maxsplit = 0):
return intsplit(str, pat, maxsplit, 1)
# Internal function used to implement split() and splitx().
def intsplit(str, pat, maxsplit, retain):
prog = compile(pat)
res = []
start = next = 0
splitcount = 0
while prog.search(str, next) >= 0:
regs = prog.regs
a, b = regs[0]
if a == b:
next = next + 1
if next >= len(str):
break
else:
res.append(str[start:a])
if retain:
res.append(str[a:b])
start = next = b
splitcount = splitcount + 1
if (maxsplit and (splitcount >= maxsplit)):
break
res.append(str[start:])
return res
# Capitalize words split using a pattern
def capwords(str, pat='[^a-zA-Z0-9_]+'):
words = splitx(str, pat)
for i in range(0, len(words), 2):
words[i] = words[i].capitalize()
return "".join(words)
# Internal subroutines:
# compile(pat): compile a pattern, caching already compiled patterns
# expand(repl, regs, str): expand \digit escapes in replacement string
# Manage a cache of compiled regular expressions.
#
# If the pattern is a string a compiled version of it is returned. If
# the pattern has been used before we return an already compiled
# version from the cache; otherwise we compile it now and save the
# compiled version in the cache, along with the syntax it was compiled
# with. Instead of a string, a compiled regular expression can also
# be passed.
cache = {}
def compile(pat):
if type(pat) != type(''):
return pat # Assume it is a compiled regex
key = (pat, regex.get_syntax())
if key in cache:
prog = cache[key] # Get it from the cache
else:
prog = cache[key] = regex.compile(pat)
return prog
def clear_cache():
global cache
cache = {}
# Expand \digit in the replacement.
# Each occurrence of \digit is replaced by the substring of str
# indicated by regs[digit]. To include a literal \ in the
# replacement, double it; other \ escapes are left unchanged (i.e.
# the \ and the following character are both copied).
def expand(repl, regs, str):
if '\\' not in repl:
return repl
new = ''
i = 0
ord0 = ord('0')
while i < len(repl):
c = repl[i]; i = i+1
if c != '\\' or i >= len(repl):
new = new + c
else:
c = repl[i]; i = i+1
if '0' <= c <= '9':
a, b = regs[ord(c)-ord0]
new = new + str[a:b]
elif c == '\\':
new = new + c
else:
new = new + '\\' + c
return new
# Test program, reads sequences "pat repl str" from stdin.
# Optional argument specifies pattern used to split lines.
def test():
import sys
if sys.argv[1:]:
delpat = sys.argv[1]
else:
delpat = '[ \t\n]+'
while 1:
if sys.stdin.isatty(): sys.stderr.write('--> ')
line = sys.stdin.readline()
if not line: break
if line[-1] == '\n': line = line[:-1]
fields = split(line, delpat)
if len(fields) != 3:
print 'Sorry, not three fields'
print 'split:', repr(fields)
continue
[pat, repl, str] = split(line, delpat)
print 'sub :', repr(sub(pat, repl, str))
print 'gsub:', repr(gsub(pat, repl, str))
| {
"content_hash": "87a638acb69f72560601c0a118dc84ae",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 73,
"avg_line_length": 31.328282828282827,
"alnum_prop": 0.60422376269547,
"repo_name": "loongson-community/EFI-MIPS",
"id": "0fc10a5f6838ba605af1d672905e5470f5bd41db",
"size": "6203",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "ToolKit/cmds/python/Lib/regsub.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "271282"
},
{
"name": "Batchfile",
"bytes": "318"
},
{
"name": "C",
"bytes": "32642014"
},
{
"name": "C++",
"bytes": "1058125"
},
{
"name": "CSS",
"bytes": "2547"
},
{
"name": "GAP",
"bytes": "111381"
},
{
"name": "Groff",
"bytes": "1245691"
},
{
"name": "HTML",
"bytes": "1328432"
},
{
"name": "Lex",
"bytes": "14559"
},
{
"name": "M",
"bytes": "748"
},
{
"name": "Makefile",
"bytes": "468567"
},
{
"name": "Mask",
"bytes": "3420"
},
{
"name": "NSIS",
"bytes": "8743"
},
{
"name": "Objective-C",
"bytes": "3415447"
},
{
"name": "Pascal",
"bytes": "3368"
},
{
"name": "Python",
"bytes": "7763565"
},
{
"name": "R",
"bytes": "546"
},
{
"name": "Shell",
"bytes": "10084"
},
{
"name": "Yacc",
"bytes": "30661"
}
],
"symlink_target": ""
} |
'''
Build a neural machine translation model with soft attention
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import ipdb
import numpy
import copy
import os
import warnings
import sys
import time
from collections import OrderedDict
from data_iterator import TextIterator
profile = False
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(
use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1,
dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s' % (pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive' % kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# batch preparation
def prepare_data(seqs_x, seqs_y, maxlen=None, n_words_src=30000,
n_words=30000):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen is not None:
new_seqs_x = []
new_seqs_y = []
new_lengths_x = []
new_lengths_y = []
for l_x, s_x, l_y, s_y in zip(lengths_x, seqs_x, lengths_y, seqs_y):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 1
maxlen_y = numpy.max(lengths_y) + 1
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_y] in enumerate(zip(seqs_x, seqs_y)):
x[:lengths_x[idx], idx] = s_x
x_mask[:lengths_x[idx]+1, idx] = 1.
y[:lengths_y[idx], idx] = s_y
y_mask[:lengths_y[idx]+1, idx] = 1.
return x, x_mask, y, y_mask
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None,
ortho=True):
if nin is None:
nin = options['dim_proj']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv',
activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(
tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
# embedding to gates transformation weights, biases
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[_p(prefix, 'W')] = W
params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
# recurrent transformation weights for gates
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix, 'U')] = U
# embedding to hidden state proposal weights, biases
Wx = norm_weight(nin, dim)
params[_p(prefix, 'Wx')] = Wx
params[_p(prefix, 'bx')] = numpy.zeros((dim,)).astype('float32')
# recurrent transformation weights for hidden state proposal
Ux = ortho_weight(dim)
params[_p(prefix, 'Ux')] = Ux
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None,
**kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix, 'Ux')].shape[1]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# utility function to slice a tensor
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# state_below is the input word embeddings
# input to the gates, concatenated
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
# input to compute the hidden state proposal
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + \
tparams[_p(prefix, 'bx')]
# step function to be used by scan
# arguments | sequences |outputs-info| non-seqs
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
# reset and update gates
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
# compute the hidden state proposal
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
# hidden state proposal
h = tensor.tanh(preactx)
# leaky integrate and obtain next hidden state
h = u * h_ + (1. - u) * h
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
# prepare scan arguments
seqs = [mask, state_below_, state_belowx]
init_states = [tensor.alloc(0., n_samples, dim)]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]]
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=init_states,
non_sequences=shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options, params, prefix='gru_cond',
nin=None, dim=None, dimctx=None,
nin_nonlin=None, dim_nonlin=None):
if nin is None:
nin = options['dim']
if dim is None:
dim = options['dim']
if dimctx is None:
dimctx = options['dim']
if nin_nonlin is None:
nin_nonlin = nin
if dim_nonlin is None:
dim_nonlin = dim
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[_p(prefix, 'W')] = W
params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim_nonlin),
ortho_weight(dim_nonlin)], axis=1)
params[_p(prefix, 'U')] = U
Wx = norm_weight(nin_nonlin, dim_nonlin)
params[_p(prefix, 'Wx')] = Wx
Ux = ortho_weight(dim_nonlin)
params[_p(prefix, 'Ux')] = Ux
params[_p(prefix, 'bx')] = numpy.zeros((dim_nonlin,)).astype('float32')
U_nl = numpy.concatenate([ortho_weight(dim_nonlin),
ortho_weight(dim_nonlin)], axis=1)
params[_p(prefix, 'U_nl')] = U_nl
params[_p(prefix, 'b_nl')] = numpy.zeros((2 * dim_nonlin,)).astype('float32')
Ux_nl = ortho_weight(dim_nonlin)
params[_p(prefix, 'Ux_nl')] = Ux_nl
params[_p(prefix, 'bx_nl')] = numpy.zeros((dim_nonlin,)).astype('float32')
# context to LSTM
Wc = norm_weight(dimctx, dim*2)
params[_p(prefix, 'Wc')] = Wc
Wcx = norm_weight(dimctx, dim)
params[_p(prefix, 'Wcx')] = Wcx
# attention: combined -> hidden
W_comb_att = norm_weight(dim, dimctx)
params[_p(prefix, 'W_comb_att')] = W_comb_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix, 'Wc_att')] = Wc_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix, 'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx, 1)
params[_p(prefix, 'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state is None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, \
'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix, 'Wc_att')]) +\
tparams[_p(prefix, 'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) +\
tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) +\
tparams[_p(prefix, 'b')]
def _step_slice(m_, x_, xx_, h_, ctx_, alpha_, pctx_, cc_,
U, Wc, W_comb_att, U_att, c_tt, Ux, Wcx,
U_nl, Ux_nl, b_nl, bx_nl):
preact1 = tensor.dot(h_, U)
preact1 += x_
preact1 = tensor.nnet.sigmoid(preact1)
r1 = _slice(preact1, 0, dim)
u1 = _slice(preact1, 1, dim)
preactx1 = tensor.dot(h_, Ux)
preactx1 *= r1
preactx1 += xx_
h1 = tensor.tanh(preactx1)
h1 = u1 * h_ + (1. - u1) * h1
h1 = m_[:, None] * h1 + (1. - m_)[:, None] * h_
# attention
pstate_ = tensor.dot(h1, W_comb_att)
pctx__ = pctx_ + pstate_[None, :, :]
#pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att)+c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:, :, None]).sum(0) # current context
preact2 = tensor.dot(h1, U_nl)+b_nl
preact2 += tensor.dot(ctx_, Wc)
preact2 = tensor.nnet.sigmoid(preact2)
r2 = _slice(preact2, 0, dim)
u2 = _slice(preact2, 1, dim)
preactx2 = tensor.dot(h1, Ux_nl)+bx_nl
preactx2 *= r2
preactx2 += tensor.dot(ctx_, Wcx)
h2 = tensor.tanh(preactx2)
h2 = u2 * h1 + (1. - u2) * h2
h2 = m_[:, None] * h2 + (1. - m_)[:, None] * h1
return h2, ctx_, alpha.T # pstate_, preact, preactx, r, u
seqs = [mask, state_below_, state_belowx]
#seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix, 'W_comb_att')],
tparams[_p(prefix, 'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')],
tparams[_p(prefix, 'U_nl')],
tparams[_p(prefix, 'Ux_nl')],
tparams[_p(prefix, 'b_nl')],
tparams[_p(prefix, 'bx_nl')]]
if one_step:
rval = _step(*(seqs + [init_state, None, None, pctx_, context] +
shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=[init_state,
tensor.alloc(0., n_samples,
context.shape[2]),
tensor.alloc(0., n_samples,
context.shape[0])],
non_sequences=[pctx_, context]+shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: bidirectional RNN
params = get_layer(options['encoder'])[0](options, params,
prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params,
prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = 2 * options['dim']
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer(options['decoder'])[0](options, params,
prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev',
nin=options['dim_word'],
nout=options['dim_word'], ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim_word'],
nout=options['n_words'])
return params
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
# for the backward rnn, we just need to invert x and x_mask
xr = x[::-1]
xr_mask = x_mask[::-1]
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
# word embedding for forward rnn (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
# word embedding for backward rnn (source)
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r',
mask=xr_mask)
# context will be the concatenation of forward and backward rnns
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim-1)
# mean of the context (across time) will be used to initialize decoder rnn
ctx_mean = (ctx * x_mask[:, :, None]).sum(0) / x_mask.sum(0)[:, None]
# or you can use the last state of forward + backward encoder rnns
# ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
# initial decoder state
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
# word embedding (target), we will shift the target sequence one time step
# to the right. This is done because of the bi-gram connections in the
# readout and decoder rnn. The first target will be all zeros and we will
# not condition on the last output.
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder - pass through the decoder conditional gru with attention
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state)
# hidden states of the decoder gru
proj_h = proj[0]
# weighted averages of context, generated by attention module
ctxs = proj[1]
# weights (alignment matrix)
opt_ret['dec_alphas'] = proj[2]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0], y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng, use_noise):
x = tensor.matrix('x', dtype='int64')
xr = x[::-1]
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source), forward and backward
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder')
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r')
# concatenate forward and backward rnn hidden states
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim-1)
# get the input for decoder rnn initializer mlp
ctx_mean = ctx.mean(0)
# ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
print 'Building f_init...',
outs = [init_state, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
# if it's the first word, emb should be all zero and it is indicated by -1
emb = tensor.switch(y[:, None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
# apply one step of conditional gru with attention
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state)
# get the next hidden state
next_state = proj[0]
# get the weighted averages of context for this target word y
ctxs = proj[1]
logit_lstm = get_layer('ff')[1](tparams, next_state, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
# compute the softmax probability
next_probs = tensor.nnet.softmax(logit)
# sample from softmax distribution to get the sample
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# compile a function to do the whole thing above, next word probability,
# sampled word for the next target, next hidden state to be used
print 'Building f_next..',
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, next_state]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample, either with stochastic sampling or beam search. Note that,
# this function iteratively calls f_init and f_next functions.
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
# k is the beam size we have
if k > 1:
assert not stochastic, \
'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
# get initial state of decoder rnn and encoder context
ret = f_init(x)
next_state, ctx0 = ret[0], ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64') # bos indicator
for ii in xrange(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score -= numpy.log(next_p[0, nw])
if nw == 0:
break
else:
cand_scores = hyp_scores[:, None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
# calculate the log probablities on a given corpus using translation model
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True):
probs = []
n_done = 0
for x, y in iterator:
n_done += len(x)
x, x_mask, y, y_mask = prepare_data(x, y,
n_words_src=options['n_words_src'],
n_words=options['n_words'])
pprobs = f_log_probs(x, x_mask, y, y_mask)
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
ipdb.set_trace()
if verbose:
print >>sys.stderr, '%d samples computed' % (n_done)
return numpy.array(probs)
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
lr0 = 0.0002
b1 = 0.1
b2 = 0.001
e = 1e-8
updates = []
i = theano.shared(numpy.float32(0.))
i_t = i + 1.
fix1 = 1. - b1**(i_t)
fix2 = 1. - b2**(i_t)
lr_t = lr0 * (tensor.sqrt(fix2) / fix1)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * tensor.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (tensor.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up,
profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads, running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up,
profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_updir' % k)
for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new+param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, x, mask, y, cost):
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup,
profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of LSTM units
encoder='gru',
decoder='gru_cond',
patience=10, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
dispFreq=100,
decay_c=0., # L2 regularization penalty
alpha_c=0., # alignment regularization
clip_c=-1., # gradient clipping threshold
lrate=0.01, # learning rate
n_words_src=100000, # source vocabulary size
n_words=100000, # target vocabulary size
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size=16,
valid_batch_size=16,
saveto='model.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq
datasets=[
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok'],
valid_datasets=['../data/dev/newstest2011.en.tok',
'../data/dev/newstest2011.fr.tok'],
dictionaries=[
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok.pkl',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok.pkl'],
use_dropout=False,
reload_=False,
overwrite=False):
# Model options
model_options = locals().copy()
# load dictionaries and invert them
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
with open(dd, 'rb') as f:
worddicts[ii] = pkl.load(f)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
# reload options
if reload_ and os.path.exists(saveto):
print 'Reloading model options'
with open('%s.pkl' % saveto, 'rb') as f:
model_options = pkl.load(f)
print 'Loading data'
train = TextIterator(datasets[0], datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
print 'Reloading model parameters'
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, y, y_mask, \
opt_ret, \
cost = \
build_model(tparams, model_options)
inps = [x, x_mask, y, y_mask]
print 'Building sampler'
f_init, f_next = build_sampler(tparams, model_options, trng, use_noise)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, cost, profile=profile)
print 'Done'
cost = cost.mean()
# apply L2 regularization on weights
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
# regularize the alpha weights
if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
alpha_reg = alpha_c * (
(tensor.cast(y_mask.sum(0)//x_mask.sum(0), 'float32')[:, None] -
opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
cost += alpha_reg
# after all regularizers - compile the computational graph for cost
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
# apply gradient clipping here
if clip_c > 0.:
g2 = 0.
for g in grads:
g2 += (g**2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (clip_c**2),
g / tensor.sqrt(g2) * clip_c,
g))
grads = new_grads
# compile the optimizer, the actual computational graph is compiled here
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
best_p = None
bad_counter = 0
uidx = 0
estop = False
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
rmodel = numpy.load(saveto)
history_errs = list(rmodel['history_errs'])
if 'uidx' in rmodel:
uidx = rmodel['uidx']
if validFreq == -1:
validFreq = len(train[0])/batch_size
if saveFreq == -1:
saveFreq = len(train[0])/batch_size
if sampleFreq == -1:
sampleFreq = len(train[0])/batch_size
for eidx in xrange(max_epochs):
n_samples = 0
for x, y in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
x, x_mask, y, y_mask = prepare_data(x, y, maxlen=maxlen,
n_words_src=n_words_src,
n_words=n_words)
if x is None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
# compute cost, grads and copy grads to shared variables
cost = f_grad_shared(x, x_mask, y, y_mask)
# do the update on parameters
f_update(lrate)
ud = time.time() - ud_start
# check for bad numbers, usually we remove non-finite elements
# and continue training - but not done here
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
# verbose
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud
# save the best model so far, in addition, save the latest model
# into a separate file with the iteration number for external eval
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving the best model...',
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, uidx=uidx, **params)
pkl.dump(model_options, open('%s.pkl' % saveto, 'wb'))
print 'Done'
# save with uidx
if not overwrite:
print 'Saving the model at iteration {}...'.format(uidx),
saveto_uidx = '{}.iter{}.npz'.format(
os.path.splitext(saveto)[0], uidx)
numpy.savez(saveto_uidx, history_errs=history_errs,
uidx=uidx, **unzip(tparams))
print 'Done'
# generate some samples with the model and display them
if numpy.mod(uidx, sampleFreq) == 0:
# FIXME: random selection?
for jj in xrange(numpy.minimum(5, x.shape[1])):
stochastic = True
sample, score = gen_sample(tparams, f_init, f_next,
x[:, jj][:, None],
model_options, trng=trng, k=1,
maxlen=30,
stochastic=stochastic,
argmax=False)
print 'Source ', jj, ': ',
for vv in x[:, jj]:
if vv == 0:
break
if vv in worddicts_r[0]:
print worddicts_r[0][vv],
else:
print 'UNK',
print
print 'Truth ', jj, ' : ',
for vv in y[:, jj]:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
print 'Sample ', jj, ': ',
if stochastic:
ss = sample
else:
score = score / numpy.array([len(s) for s in sample])
ss = sample[score.argmin()]
for vv in ss:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
# validate model on validation set and early stop if necessary
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
valid_errs = pred_probs(f_log_probs, prepare_data,
model_options, valid)
valid_err = valid_errs.mean()
history_errs.append(valid_err)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= \
numpy.array(history_errs)[:-patience].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
if numpy.isnan(valid_err):
ipdb.set_trace()
print 'Valid ', valid_err
# finish after this many updates
if uidx >= finish_after:
print 'Finishing after %d iterations!' % uidx
estop = True
break
print 'Seen %d samples' % n_samples
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
valid_err = pred_probs(f_log_probs, prepare_data,
model_options, valid).mean()
print 'Valid ', valid_err
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p,
history_errs=history_errs,
uidx=uidx,
**params)
return valid_err
if __name__ == '__main__':
pass
| {
"content_hash": "c9461e4b109f424d132dda9509208df4",
"timestamp": "",
"source": "github",
"line_count": 1302,
"max_line_length": 83,
"avg_line_length": 35.704301075268816,
"alnum_prop": 0.5187041538494633,
"repo_name": "vineetm/dl4mt-material",
"id": "e1e9068d6854199e0ffbf00f755fbe14542e4703",
"size": "46487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "session3/nmt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "17034"
},
{
"name": "JavaScript",
"bytes": "835"
},
{
"name": "NewLisp",
"bytes": "1582"
},
{
"name": "Perl",
"bytes": "27041"
},
{
"name": "Python",
"bytes": "283730"
},
{
"name": "Ruby",
"bytes": "1649"
},
{
"name": "Shell",
"bytes": "32788"
},
{
"name": "Slash",
"bytes": "356"
},
{
"name": "Smalltalk",
"bytes": "1892"
},
{
"name": "SystemVerilog",
"bytes": "184"
}
],
"symlink_target": ""
} |
from abc import abstractmethod
import psutil
import time
import threading
import os
class Monitor(object):
def __init__(self):
self.min_usage = 0.0
self.max_usage = 0.0
self.avg_usage = 0.0
self.measured_values = []
self.process = psutil.Process(self.process_id)
self.process_name = self.process.name()
@staticmethod
def factory(type):
if type == 'CpuMonitor':
return CpuMonitor()
elif type == 'MemoryMonitor':
return MemoryMonitor()
assert 0, "Bad monitor creation: " + type
@abstractmethod
def start(self):
pass
@abstractmethod
def measure_usage(self):
pass
@abstractmethod
def print_values(self):
pass
def get_max_usage(self):
if max(self.measured_values) > self.max_usage or self.max_usage == 0:
return max(self.measured_values)
return self.max_usage
def get_min_usage(self):
min_measured_value = min(self.measured_values)
if ((min_measured_value < self.min_usage) \
or (self.min_usage == 0.0)) \
and min_measured_value != 0.0:
return min_measured_value
return self.min_usage
def print_process_name(self):
print("-------------------------")
print("Process name: {}".format(self.process_name))
print("-------------------------")
def print_date(self):
print("\n-------------------------")
print("{}".format(time.strftime('%c')))
print("-------------------------")
def set_values(self):
self.avg_usage = (sum(self.measured_values) /
len(self.measured_values))
self.min_usage = self.get_min_usage()
self.max_usage = self.get_max_usage()
def length_reached(self):
self.set_values()
self.print_values()
del self.measured_values[:]
self.measured_values.append(self.avg_usage)
class CpuMonitor(Monitor):
def __init__(self):
super(CpuMonitor, self).__init__()
def start(self):
threading.Thread(target=self.measure_usage).start()
def print_values(self):
self.print_date()
print(" Total CPU")
print("-------------------------")
print("minimum usage: {0:.2f} %".format(
self.min_usage / psutil.cpu_count()))
print("maximum usage: {0:.2f} %".format(
self.max_usage / psutil.cpu_count()))
print("avarage usage: {0:.2f} %".format(
self.avg_usage / psutil.cpu_count()))
print("-------------------------")
def set_measured_values(self):
self.measured_values = \
[ self.process.cpu_percent(interval=self.frequency) for _ in range(self.length) ]
def measure_usage(self):
try:
while True:
self.set_measured_values()
self.length_reached()
except psutil.NoSuchProcess:
print ("Process {} stopped".format(self.process_name))
os._exit(0)
class MemoryMonitor(Monitor):
def __init__(self):
super(MemoryMonitor, self).__init__()
@staticmethod
def byte_to_mb(value):
return value / 1000000
def print_values(self):
self.print_date()
print(" Memory")
print("-------------------------")
print("minimum usage: {} MB".format(self.byte_to_mb(self.min_usage)))
print("maximum usage: {} MB".format(self.byte_to_mb(self.max_usage)))
print("avarage usage: {} MB".format(self.byte_to_mb(self.avg_usage)))
print("-------------------------")
def start(self):
threading.Thread(target=self.measure_usage).start()
def is_time_expired(self, start_time):
if time.time() - start_time >= self.frequency:
return True
return False
def measure_usage(self):
try:
self.measured_values.append(self.process.memory_info()[0])
start_time = time.time()
while True:
if self.is_time_expired(start_time):
self.measured_values.append(self.process.memory_info()[0])
start_time = time.time()
if len(self.measured_values) > self.length:
self.length_reached()
time.sleep(.001)
except psutil.NoSuchProcess:
print ("Process {} stopped".format(self.process_name))
os._exit(0)
| {
"content_hash": "9fd73489328057db048c5ede1c5fe876",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 94,
"avg_line_length": 30.527027027027028,
"alnum_prop": 0.5334218680832227,
"repo_name": "docoprusta/process-monitor",
"id": "02d13302b2ec169e1cfcb063b746c15a45999045",
"size": "4518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6076"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
from zope.interface import Interface, implements
class ITimeseries(Interface):
"""Represent a single time series -- one stream of scalars
"""
def add(self, *args):
"""Add a new reading to this timeseries.
This version is thread-safe -- can be called from any thread.
"""
def _add(self, *args):
"""Add a new reading to this timeseries.
This version should only be called from the twisted main loop.
"""
def render(self):
pass
class ITimeseriesFactory(Interface):
def __call__(unit,
data_type="integer",
timezone="America/Los_Angeles",
key=None,
new_uuid=None,
buffersz=1):
"""Create a Timeseries
We set the default data type to Integer and timezone to the
West Coast, so the only *required* information is the unit of
measure for this stream.
If both key and uuid are None, there is an exception because
one of these is needed to assign the timeseries a UUID.
Otherwise, "uuid" is used first, followed by a uuid generated
algorithm 5 based on key + smap.root_uuid.
@buffersz how many readings are stored in the Reading resource
(for GET requests, reporting is handled separately).
"""
print "__CALL__"
class ICollection(Interface):
"""Represent a collection of resources, with attached metadata
"""
def add_child(self, child):
"""Add a child to the collection"""
def render(self):
pass
class IActuator(Interface):
def setup(self, opts):
"""Set up an actuator"""
def parse_state(self, state):
"""Parse a state from a string into a state representation"""
def set_state(self, state):
"""Set the state of the actuator. The value passed in as the
new state will be the result of calling parse_state"""
def get_state(self, state):
"""Read the current state of the actuator. This should query
the device for the state, rather than using a cached value."""
class ISmapInstance(Interface):
"""Represents a sMAP instance -- a whole tree of Collections and Timeseries.
"""
def lookup(self, id, pred=None):
"""Look up a sMAP point relative by path or uuid.
Thread-safe version
"""
def get_timeseries(self, id):
"""Get a timeseries by identifier"""
def get_collection(self, id):
"""Get a collection by identifier"""
def add_timeseries(self, path, *args, **kwargs):
pass
def add_collection(self, path, collection):
pass
class IReporting(Interface):
"""Represents a reporting object, which is responsible for sending
data to interested consumers.
"""
def add_report(self, rpt):
pass
def get_report(self, rpt):
pass
def update_report(self, rpt):
pass
def update_subscriptions(self):
pass
def publish(self):
pass
def _flush(self, force=False):
"""Try to send out all data which is able to be sent currently
@force ignore MinPeriod and MaxPeriod requests in the reporting instance
@return a deferred which will fire when all the data has
delivered. Any errors in delivery will cause the errBack
to fire instead.
"""
def flush(self):
pass
class ISmapDriver(Interface):
"""Represent a sMAP driver -- something which attaches itself at
some point in the tree and then may produce readings
"""
def setup(self, instance, opts={}):
"""The driver should register its points with the relative
sMAP instance in this call.
@instance an object implementing ISmapInstanve
@opts dict of other options to be passed to the driver, ie from a config file
"""
def start(self):
"""Called when the instance should start producing readings.
"""
| {
"content_hash": "be93ae76b3dc7c24bdd4f2a54b7e142b",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 85,
"avg_line_length": 33.21341463414634,
"alnum_prop": 0.6649531852395815,
"repo_name": "jf87/smap",
"id": "b9c083ab503d08947decdb2d4bb161dc75e2d5ed",
"size": "5447",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "python/smap/interface.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "325117"
},
{
"name": "HTML",
"bytes": "9642"
},
{
"name": "Java",
"bytes": "47918"
},
{
"name": "Lua",
"bytes": "9058"
},
{
"name": "Makefile",
"bytes": "5715"
},
{
"name": "Python",
"bytes": "1704999"
},
{
"name": "R",
"bytes": "23461"
},
{
"name": "Shell",
"bytes": "1273"
},
{
"name": "TeX",
"bytes": "40212"
},
{
"name": "XSLT",
"bytes": "5081"
}
],
"symlink_target": ""
} |
from django.dispatch import receiver
from crudbuilder.signals import post_create_signal, post_update_signal
from crudbuilder.tests.models import TestModel
@receiver(post_create_signal, sender=TestModel)
def post_create_signal_handler(sender, **kwargs):
pass
@receiver(post_update_signal, sender=TestModel)
def post_update_signal_handler(sender, **kwargs):
pass
| {
"content_hash": "f7c6b7f43241dda489d64586f1b09696",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.7887700534759359,
"repo_name": "asifpy/django-crudbuilder",
"id": "676832c52b0ee4248df674e2cbc94f0eecca54c0",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crudbuilder/tests/handlers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "25511"
},
{
"name": "JavaScript",
"bytes": "13215"
},
{
"name": "Python",
"bytes": "68257"
}
],
"symlink_target": ""
} |
import tests.exog.test_random_exogenous as testrandexog
testrandexog.test_random_exogenous( 32,20); | {
"content_hash": "34a598fef01feab305cb2c4c636c03db",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 55,
"avg_line_length": 25.25,
"alnum_prop": 0.8217821782178217,
"repo_name": "antoinecarme/pyaf",
"id": "bdb923ca406169959023c0afc8e0faf6f54acc54",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/exog/random/random_exog_32_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import locale
from django.template import Library
register = Library()
def numberformat(value):
if not value:
return "0";
return locale.format("%.2f", value)
register.filter('numberformat', numberformat)
| {
"content_hash": "a94a400097ba7ac0230e8b75de34b12e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 45,
"avg_line_length": 20.272727272727273,
"alnum_prop": 0.7085201793721974,
"repo_name": "akarambir/shadow",
"id": "935522ea337d2151237f4ac26c11e0f70c95b375",
"size": "223",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "expenses/templatetags/numberformat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1723"
},
{
"name": "JavaScript",
"bytes": "693"
},
{
"name": "Python",
"bytes": "49131"
}
],
"symlink_target": ""
} |
import re
from datetime import timedelta
from django import template
from django.conf import settings
from django.template.defaultfilters import time
from django.template.defaultfilters import date as datefilter
from django.utils.formats import get_format
from django.utils.safestring import mark_safe
from icekit.templatetags.icekit_tags import grammatical_join
register = template.Library()
@register.filter
def timesf(times_list, format=None):
"""
:param times_list: the list of times to format
:param format: the format to apply (default = settings.TIME_FORMAT)
:return: the times, formatted according to the format
"""
return [time(t, format) for t in times_list]
@register.filter
def times_range(event, format=None):
if event.human_times:
return event.human_times.strip()
sts = timesf(event.start_times_set(), format=format)
all_days = [o for o in event.occurrence_list if o.is_all_day]
if all_days:
sts = ["all day"] + sts
if len(sts) > 3:
return "Various times"
times = grammatical_join(sts, final_join=", ")
return times
@register.filter
def add_days(date, days):
return date + timedelta(days)
YEAR_RE = r"\W*(o|y|Y)\W*" # year markers, plus any surrounding non-word text
MONTH_RE = r"\W*(b|E|F|m|M|n|N|S|t)\W*" # month markers, plus any surrounding non-word text
def _format_with_same_year(format_specifier):
"""
Return a version of `format_specifier` that renders a date
assuming it has the same year as another date. Usually this means ommitting
the year.
This can be overridden by specifying a format that has `_SAME_YEAR` appended
to the name in the project's `formats` spec.
"""
# gotta use a janky way of resolving the format
test_format_specifier = format_specifier + "_SAME_YEAR"
test_format = get_format(test_format_specifier, use_l10n=True)
if test_format == test_format_specifier:
# this format string didn't resolve to anything and may be a raw format.
# Use a regex to remove year markers instead.
return re.sub(YEAR_RE, '', get_format(format_specifier))
else:
return test_format
def _format_with_same_year_and_month(format_specifier):
"""
Return a version of `format_specifier` that renders a date
assuming it has the same year and month as another date. Usually this
means ommitting the year and month.
This can be overridden by specifying a format that has
`_SAME_YEAR_SAME_MONTH` appended to the name in the project's `formats`
spec.
"""
test_format_specifier = format_specifier + "_SAME_YEAR_SAME_MONTH"
test_format = get_format(test_format_specifier, use_l10n=True)
if test_format == test_format_specifier:
# this format string didn't resolve to anything and may be a raw format.
# Use a regex to remove year and month markers instead.
no_year = re.sub(YEAR_RE, '', get_format(format_specifier))
return re.sub(MONTH_RE, '', no_year)
else:
return test_format
@register.filter(is_safe=True)
def dates_range(event, format=""):
"""
:param event: An Event
:param format: A |-separated string specifying:
date_format - a format specifier
separator - the string to join the start and end dates with, if they're
different
no_dates_text - text to return if the event has no occurrences,
default ''
from_text - text to prepend if the event never ends (the 'last' date is
None)
ended_text - text to append if the event has ended
:return: text describing the date range for the event. If human dates are
given, use that, otherwise, use the first and last occurrences for an event.
If the first and last dates have year or year and month in common,
the format string for the first date is modified to exclude those items.
If the first and last dates are equal, only the first date is used (ie with
no range)
You can override this behaviour by specifying additional formats with
"_SAME_YEAR" and "_SAME_YEAR_SAME_MONTH" appended to the name.
"""
# TODO: factor out a more general filter that accepts 1-2 dates and
# renders the range.
# resolve arguments
date_format = settings.DATE_FORMAT # Django's default
separator = " – "
no_dates_text = ''
from_text = "from "
ended_text = " (ended)"
arg_list = [arg.strip() for arg in format.split('|')]
if arg_list:
date_format = arg_list[0]
try:
separator = arg_list[1]
no_dates_text = arg_list[2]
from_text = arg_list[3]
ended_text = arg_list[4]
except IndexError:
pass
if event.has_finished():
f = ended_text
else:
f = ""
if event.human_dates:
return event.human_dates.strip() + f
# Get the dates from the occurrence
first, last = event.get_occurrences_range()
start, end = None, None
if first:
start = first.local_start
if last:
end = last.local_end
# figure out to what extent the dates differ
if start and end:
first_date_format = get_format(date_format, use_l10n=True)
if start.year == end.year:
# use a first_date_format without the year
first_date_format = _format_with_same_year(date_format)
if start.month == end.month:
# remove month spec from first_date_format
first_date_format = _format_with_same_year_and_month(date_format)
if start.day == end.day:
# the two dates are equal, just return one date.
return mark_safe(datefilter(start, date_format) + f)
return mark_safe('%s%s%s' % (
datefilter(start, first_date_format),
separator,
datefilter(end, date_format)
) + f)
elif start and not end:
return '%s%s' % (from_text, datefilter(start, date_format)) + f
elif not (start or end):
return no_dates_text
else:
raise AssertionError("Got a date range that has a last date but no first date")
| {
"content_hash": "035e95014977572e9980ff59e20d53fd",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 91,
"avg_line_length": 36.345029239766085,
"alnum_prop": 0.6466613032984715,
"repo_name": "ic-labs/icekit-events",
"id": "880e85fb170980272906c5c685d80c91461106e2",
"size": "6215",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "icekit_events/templatetags/events_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1334"
},
{
"name": "HTML",
"bytes": "19090"
},
{
"name": "JavaScript",
"bytes": "1759"
},
{
"name": "Python",
"bytes": "208757"
}
],
"symlink_target": ""
} |
"""Offer numeric state listening automation rules."""
import logging
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.const import (
CONF_ABOVE,
CONF_ATTRIBUTE,
CONF_BELOW,
CONF_ENTITY_ID,
CONF_FOR,
CONF_PLATFORM,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import CALLBACK_TYPE, HassJob, HomeAssistant, callback
from homeassistant.helpers import (
condition,
config_validation as cv,
entity_registry as er,
template,
)
from homeassistant.helpers.event import (
async_track_same_state,
async_track_state_change_event,
)
from homeassistant.helpers.typing import ConfigType
# mypy: allow-incomplete-defs, allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs
def validate_above_below(value):
"""Validate that above and below can co-exist."""
above = value.get(CONF_ABOVE)
below = value.get(CONF_BELOW)
if above is None or below is None:
return value
if isinstance(above, str) or isinstance(below, str):
return value
if above > below:
raise vol.Invalid(
f"A value can never be above {above} and below {below} at the same time. You probably want two different triggers.",
)
return value
_TRIGGER_SCHEMA = vol.All(
cv.TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): "numeric_state",
vol.Required(CONF_ENTITY_ID): cv.entity_ids_or_uuids,
vol.Optional(CONF_BELOW): cv.NUMERIC_STATE_THRESHOLD_SCHEMA,
vol.Optional(CONF_ABOVE): cv.NUMERIC_STATE_THRESHOLD_SCHEMA,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_FOR): cv.positive_time_period_template,
vol.Optional(CONF_ATTRIBUTE): cv.match_all,
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
validate_above_below,
)
_LOGGER = logging.getLogger(__name__)
async def async_validate_trigger_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate trigger config."""
config = _TRIGGER_SCHEMA(config)
registry = er.async_get(hass)
config[CONF_ENTITY_ID] = er.async_validate_entity_ids(
registry, cv.entity_ids_or_uuids(config[CONF_ENTITY_ID])
)
return config
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
*,
platform_type: str = "numeric_state",
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
entity_ids: list[str] = config[CONF_ENTITY_ID]
below = config.get(CONF_BELOW)
above = config.get(CONF_ABOVE)
time_delta = config.get(CONF_FOR)
template.attach(hass, time_delta)
value_template = config.get(CONF_VALUE_TEMPLATE)
unsub_track_same = {}
armed_entities = set()
period: dict = {}
attribute = config.get(CONF_ATTRIBUTE)
job = HassJob(action)
trigger_data = automation_info["trigger_data"]
_variables = automation_info["variables"] or {}
if value_template is not None:
value_template.hass = hass
def variables(entity_id):
"""Return a dict with trigger variables."""
trigger_info = {
"trigger": {
"platform": "numeric_state",
"entity_id": entity_id,
"below": below,
"above": above,
"attribute": attribute,
}
}
return {**_variables, **trigger_info}
@callback
def check_numeric_state(entity_id, from_s, to_s):
"""Return whether the criteria are met, raise ConditionError if unknown."""
return condition.async_numeric_state(
hass, to_s, below, above, value_template, variables(entity_id), attribute
)
# Each entity that starts outside the range is already armed (ready to fire).
for entity_id in entity_ids:
try:
if not check_numeric_state(entity_id, None, entity_id):
armed_entities.add(entity_id)
except exceptions.ConditionError as ex:
_LOGGER.warning(
"Error initializing '%s' trigger: %s",
automation_info["name"],
ex,
)
@callback
def state_automation_listener(event):
"""Listen for state changes and calls action."""
entity_id = event.data.get("entity_id")
from_s = event.data.get("old_state")
to_s = event.data.get("new_state")
@callback
def call_action():
"""Call action with right context."""
hass.async_run_hass_job(
job,
{
"trigger": {
**trigger_data,
"platform": platform_type,
"entity_id": entity_id,
"below": below,
"above": above,
"from_state": from_s,
"to_state": to_s,
"for": time_delta if not time_delta else period[entity_id],
"description": f"numeric state of {entity_id}",
}
},
to_s.context,
)
@callback
def check_numeric_state_no_raise(entity_id, from_s, to_s):
"""Return True if the criteria are now met, False otherwise."""
try:
return check_numeric_state(entity_id, from_s, to_s)
except exceptions.ConditionError:
# This is an internal same-state listener so we just drop the
# error. The same error will be reached and logged by the
# primary async_track_state_change_event() listener.
return False
try:
matching = check_numeric_state(entity_id, from_s, to_s)
except exceptions.ConditionError as ex:
_LOGGER.warning("Error in '%s' trigger: %s", automation_info["name"], ex)
return
if not matching:
armed_entities.add(entity_id)
elif entity_id in armed_entities:
armed_entities.discard(entity_id)
if time_delta:
try:
period[entity_id] = cv.positive_time_period(
template.render_complex(time_delta, variables(entity_id))
)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error(
"Error rendering '%s' for template: %s",
automation_info["name"],
ex,
)
return
unsub_track_same[entity_id] = async_track_same_state(
hass,
period[entity_id],
call_action,
entity_ids=entity_id,
async_check_same_func=check_numeric_state_no_raise,
)
else:
call_action()
unsub = async_track_state_change_event(hass, entity_ids, state_automation_listener)
@callback
def async_remove():
"""Remove state listeners async."""
unsub()
for async_remove in unsub_track_same.values():
async_remove()
unsub_track_same.clear()
return async_remove
| {
"content_hash": "18e6f2f06b71f28b564af0a269c8225b",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 128,
"avg_line_length": 32.838427947598255,
"alnum_prop": 0.5715425531914894,
"repo_name": "toddeye/home-assistant",
"id": "934cc99993a6bfa32b97d9830efc5bd5fafba3e6",
"size": "7520",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homeassistant/triggers/numeric_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
from parsl.launchers import AprunLauncher
from parsl.providers import TorqueProvider
config = Config(
executors=[
HighThroughputExecutor(
label="bw_htex",
cores_per_worker=1,
worker_debug=False,
provider=TorqueProvider(
queue='normal',
launcher=AprunLauncher(overrides="-b -- bwpy-environ --"),
scheduler_options='', # string to prepend to #SBATCH blocks in the submit script to the scheduler
worker_init='', # command to run before starting a worker, such as 'source activate env'
init_blocks=1,
max_blocks=1,
min_blocks=1,
nodes_per_block=2,
walltime='00:10:00'
),
)
],
)
| {
"content_hash": "eb2925068c45e7bcb5b09c26ff1e4928",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 114,
"avg_line_length": 31.821428571428573,
"alnum_prop": 0.5712682379349046,
"repo_name": "Parsl/parsl",
"id": "38ae9cbad8012d7a5221ea86510f058f3865c65a",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/configs/bluewaters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1263"
},
{
"name": "CSS",
"bytes": "337"
},
{
"name": "HTML",
"bytes": "12706"
},
{
"name": "Makefile",
"bytes": "4908"
},
{
"name": "Python",
"bytes": "1173869"
},
{
"name": "Shell",
"bytes": "12057"
}
],
"symlink_target": ""
} |
##
# Import Modules
#
from distutils.core import setup, Extension
import os
if 'BASE_TOOLS_PATH' not in os.environ:
raise "Please define BASE_TOOLS_PATH to the root of base tools tree"
BaseToolsDir = os.environ['BASE_TOOLS_PATH']
setup(
name="PyUtility",
version="0.01",
ext_modules=[
Extension(
'PyUtility',
sources=[
'PyUtility.c'
],
include_dirs=[
os.path.join(BaseToolsDir, 'Source', 'C', 'Include'),
os.path.join(BaseToolsDir, 'Source', 'C', 'Include', 'Ia32'),
os.path.join(BaseToolsDir, 'Source', 'C', 'Common')
],
)
],
)
| {
"content_hash": "44e2e695adfdbaa2906f9fde575b16ae",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 25.517241379310345,
"alnum_prop": 0.5,
"repo_name": "tianocore/buildtools-BaseTools",
"id": "e4d407d84f5e0609be99e0da47c9e497f3f82abc",
"size": "1307",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Source/C/PyUtility/setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2774879"
},
{
"name": "C++",
"bytes": "516623"
},
{
"name": "GAP",
"bytes": "374642"
},
{
"name": "Objective-C",
"bytes": "106673"
},
{
"name": "Python",
"bytes": "5675290"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "Shell",
"bytes": "49564"
}
],
"symlink_target": ""
} |
from corehq.apps.fixtures.resources.v0_1 import InternalFixtureResource
from corehq.apps.locations.resources.v0_1 import InternalLocationResource
from .utils import APIResourceTest
class InternalTestMixin(object):
def assert_accessible_via_sessions(self, url):
# api auth should succeed
headers = self._get_api_key_auth_headers()
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, 200)
# session auth should also succeed since these are used internally over sessions
self.client.login(username=self.username, password=self.password)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class InternalFixtureResourceTest(APIResourceTest, InternalTestMixin):
resource = InternalFixtureResource
api_name = 'v0_5'
def test_basic(self):
self.assert_accessible_via_sessions(self.list_endpoint)
class InternalLocationResourceTest(APIResourceTest, InternalTestMixin):
resource = InternalLocationResource
api_name = 'v0_5'
def test_basic(self):
self.assert_accessible_via_sessions(self.list_endpoint)
| {
"content_hash": "573d68d58675c359ffca9eae3591e1d5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 88,
"avg_line_length": 36.34375,
"alnum_prop": 0.7403267411865864,
"repo_name": "dimagi/commcare-hq",
"id": "8c06c8f989de1cec4c33e9b06e295404a4b0256c",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/api/tests/internal_resources.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from nrrd import NrrdHeader
from nrrd import NrrdReader
from nrrd import NrrdWriter
from utils import nifti_to_nrrd
from utils import nrrd_to_nifti
| {
"content_hash": "ab4692978f99964b9063d2ca8e0675f0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 31,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.8322147651006712,
"repo_name": "sinkpoint/pynrrd",
"id": "24c7e188a7861be555038690a945dc6b1c1f6afd",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynrrd/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16643"
}
],
"symlink_target": ""
} |
"""view for aloha editor"""
from django.shortcuts import render_to_response
from django.template import RequestContext
from djaloha import settings
from djaloha.utils import get_model
def aloha_init(request):
"""
Build the javascript file which is initializing the aloha-editor
Run the javascript code for the AlohaInput widget
"""
links = []
for full_model_name in settings.link_models():
app_name, model_name = full_model_name.split('.')
model = get_model(app_name, model_name)
if model:
links.extend(model.objects.all())
return render_to_response(
settings.init_js_template(),
{
'links': links,
'config': {
'jquery_no_conflict': settings.jquery_no_conflict(),
'sidebar_disabled': 'true' if settings.sidebar_disabled() else 'false',
'css_classes': settings.css_classes(),
'resize_disabled': settings.resize_disabled(),
},
},
content_type='text/javascript',
context_instance=RequestContext(request)
)
| {
"content_hash": "c45b15221b055efe58c6be27a0d7ed86",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 87,
"avg_line_length": 31.11111111111111,
"alnum_prop": 0.6098214285714286,
"repo_name": "ljean/djaloha",
"id": "cd5907d633d60256191181232582a1e33099ae3e",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djaloha/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "290231"
},
{
"name": "HTML",
"bytes": "241654"
},
{
"name": "JavaScript",
"bytes": "13001978"
},
{
"name": "PHP",
"bytes": "49544"
},
{
"name": "Python",
"bytes": "25150"
}
],
"symlink_target": ""
} |
import eventlet
import errno
import imp
import os
import os.path
import sys
from daemon.daemon import DaemonContext
from daemon.runner import DaemonRunner, make_pidlockfile
from django.conf import settings as django_settings
from django.core.management import call_command
from eventlet import wsgi
from optparse import OptionParser
from sentry import VERSION
def settings_from_file(filename, silent=False):
"""
Configures django settings from an arbitrary (non sys.path) filename.
"""
mod = imp.new_module('config')
mod.__file__ = filename
try:
execfile(filename, mod.__dict__)
except IOError, e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
if not django_settings.configured:
django_settings.configure()
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(django_settings, setting, setting_value)
class SentryServer(DaemonRunner):
pidfile_timeout = 10
start_message = u"started with pid %(pid)d"
def __init__(self, host=None, port=None, pidfile=None,
logfile=None, daemonize=False, debug=False):
from sentry.conf import settings
if not logfile:
logfile = settings.WEB_LOG_FILE
logfile = os.path.realpath(logfile)
pidfile = os.path.realpath(pidfile or settings.WEB_PID_FILE)
if daemonize:
detach_process = True
else:
detach_process = False
self.daemon_context = DaemonContext(detach_process=detach_process)
self.daemon_context.stdout = open(logfile, 'w+')
self.daemon_context.stderr = open(logfile, 'w+', buffering=0)
self.debug = debug
self.pidfile = make_pidlockfile(pidfile, self.pidfile_timeout)
self.daemon_context.pidfile = self.pidfile
self.host = host or settings.WEB_HOST
self.port = port or settings.WEB_PORT
# HACK: set app to self so self.app.run() works
self.app = self
def execute(self, action):
self.action = action
# Upgrade needs to happen before forking
upgrade()
if self.daemon_context.detach_process is False and self.action == 'start':
# HACK:
self.run()
else:
self.do_action()
def run(self):
from sentry.wsgi import application
def inner_run():
wsgi.server(eventlet.listen((self.host, self.port)), application)
if self.debug:
from django.utils import autoreload
autoreload.main(inner_run)
else:
inner_run()
def cleanup(days=30, logger=None, site=None, server=None):
from sentry.models import GroupedMessage, Message
from sentry.utils.query import RangeQuerySetWrapper, SkinnyQuerySet
import datetime
ts = datetime.datetime.now() - datetime.timedelta(days=days)
qs = SkinnyQuerySet(Message).filter(datetime__lte=ts)
if logger:
qs.filter(logger=logger)
if site:
qs.filter(site=site)
if server:
qs.filter(server_name=server)
for obj in RangeQuerySetWrapper(qs):
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
# TODO: we should collect which messages above were deleted
# and potentially just send out post_delete signals where
# GroupedMessage can update itself accordingly
qs = SkinnyQuerySet(GroupedMessage).filter(last_seen__lte=ts)
if logger:
qs.filter(logger=logger)
for obj in RangeQuerySetWrapper(qs):
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
def upgrade(interactive=True):
from sentry.conf import settings
call_command('syncdb', database=settings.DATABASE_USING or 'default', interactive=interactive)
if 'south' in django_settings.INSTALLED_APPS:
call_command('migrate', database=settings.DATABASE_USING or 'default', interactive=interactive)
def main():
command_list = ('start', 'stop', 'restart', 'cleanup', 'upgrade')
args = sys.argv
if len(args) < 2 or args[1] not in command_list:
print "usage: sentry [command] [options]"
print
print "Available subcommands:"
for cmd in command_list:
print " ", cmd
sys.exit(1)
parser = OptionParser(version="%%prog %s" % VERSION)
parser.add_option('--config', metavar='CONFIG')
if args[1] == 'start':
parser.add_option('--host', metavar='HOSTNAME')
parser.add_option('--port', type=int, metavar='PORT')
parser.add_option('--daemon', action='store_true', default=False, dest='daemonize')
parser.add_option('--no-daemon', action='store_false', default=False, dest='daemonize')
parser.add_option('--debug', action='store_true', default=False, dest='debug')
parser.add_option('--pidfile', dest='pidfile')
parser.add_option('--logfile', dest='logfile')
elif args[1] == 'stop':
parser.add_option('--pidfile', dest='pidfile')
parser.add_option('--logfile', dest='logfile')
elif args[1] == 'cleanup':
parser.add_option('--days', default='30', type=int,
help='Numbers of days to truncate on.')
parser.add_option('--logger',
help='Limit truncation to only entries from logger.')
parser.add_option('--site',
help='Limit truncation to only entries from site.')
parser.add_option('--server',
help='Limit truncation to only entries from server.')
(options, args) = parser.parse_args()
# Install default server values
if not django_settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
if options.config:
# assumed to be a file
settings_from_file(options.config)
else:
config_path = os.path.expanduser(os.path.join('~', '.sentry', 'sentry.conf.py'))
if os.path.exists(config_path):
settings_from_file(config_path)
if getattr(options, 'debug', False):
django_settings.DEBUG = True
if args[0] == 'upgrade':
upgrade()
elif args[0] == 'start':
app = SentryServer(host=options.host, port=options.port,
pidfile=options.pidfile, logfile=options.logfile,
daemonize=options.daemonize, debug=options.debug)
app.execute(args[0])
elif args[0] == 'restart':
app = SentryServer()
app.execute(args[0])
elif args[0] == 'stop':
app = SentryServer(pidfile=options.pidfile, logfile=options.logfile)
app.execute(args[0])
elif args[0] == 'cleanup':
cleanup(days=options.days, logger=options.logger, site=options.site, server=options.server)
sys.exit(0)
if __name__ == '__main__':
main() | {
"content_hash": "af6a4f68632cb934067fe1a4b241cb49",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 103,
"avg_line_length": 34.38028169014085,
"alnum_prop": 0.6180527106377168,
"repo_name": "WoLpH/django-sentry",
"id": "6eadf7b4ac527f95f0bd78a7f11dc708e04e80d2",
"size": "7345",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sentry/scripts/runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "11465"
},
{
"name": "Python",
"bytes": "283787"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
"""
Settings for testing
"""
NAMESPACE = "percept" | {
"content_hash": "d52b8bea66c040f0139d9e3991285d0f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 21,
"avg_line_length": 10.2,
"alnum_prop": 0.6666666666666666,
"repo_name": "VikParuchuri/percept",
"id": "3ea69e1e0b5db6f85f5117df298cbccd0db66399",
"size": "51",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "percept/tests/test_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81776"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
} |
'''
Created on August 21, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from devices.light.light import LightDevice
class LevitonDecoraLightswitchDevice(LightDevice):
"""Leviton Decora in-wall switch lighting device"""
# Measurement Names
MEASUREMENT_NAME_STATUS = 'state'
# Command Names
COMMAND_NAME_STATUS = 'outletStatus'
# List of Device Types this class is compatible with
DEVICE_TYPES = [9001]
def __init__(self, botengine, device_id, device_type, device_description, precache_measurements=True):
LightDevice.__init__(self, botengine, device_id, device_type, device_description, precache_measurements=precache_measurements)
if not hasattr(self, "saved_state"):
self.saved_state = False
if not hasattr(self, "saved"):
self.saved = False
#===========================================================================
# Attributes
#===========================================================================
def get_device_type_name(self, language):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Device type name
return _("Leviton Decora In-Wall Switch")
def get_image_name(self, botengine):
"""
:return: the font icon name of this device type
"""
return "switch"
def is_command(self, measurement_name):
"""
:param measurement_name: Name of a local measurement name
:return: True if the given parameter name is a command
"""
return measurement_name == self.COMMAND_NAME_STATUS
def is_light(self):
"""
:return: True if this is a light
"""
return True
#===========================================================================
# Commands
#===========================================================================
def save(self, botengine):
"""Save the status of this device"""
if not self.is_connected or not self.can_control:
return False
try:
self.saved_state = (int(self.measurements[self.MEASUREMENT_NAME_STATUS][0][0]) == 1)
except:
self.saved_state = False
#botengine.get_logger().info("Light Switch [" + str(self.device_id) + "] saved state = " + str(self.saved_state))
self.saved = True
return True
def restore(self, botengine):
"""
Restore the status of the device from the save point
:return: True if the lights were restored, False if there was nothing to restore
"""
#botengine.get_logger().info("GE Light Switch: Restore")
if not self.is_connected or not self.can_control:
#botengine.get_logger().info("\t=> Can't control or not connected")
return False
#botengine.get_logger().info("\t>restore(" + str(self.device_id) + ")")
if not self.saved:
botengine.get_logger().info("\t<restore() : Nothing to restore")
return False
self.saved = False
if self.saved_state:
self.on(botengine)
else:
self.off(botengine)
#self.log("<restore() : Restored")
return True
def raw_command(self, botengine, name, value):
"""
Send a command for the given local measurement name
"""
if name == self.COMMAND_NAME_STATUS:
if value:
self.on(botengine)
else:
self.off(botengine)
def on(self, botengine):
"""Turn on"""
if not self.is_connected or not self.can_control:
return False
botengine.get_logger().info(">on(lightswitch " + str(self.device_id) + ")")
botengine.send_command(self.device_id, self.COMMAND_NAME_STATUS, "1")
return True
def off(self, botengine):
"""Turn off"""
if not self.is_connected or not self.can_control:
return False
botengine.get_logger().info(">off(lightswitch " + str(self.device_id) + ")")
botengine.send_command(self.device_id, self.COMMAND_NAME_STATUS, "0")
return True
| {
"content_hash": "baae768c6119c9a0c2617aa578fe5405",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 134,
"avg_line_length": 33.28888888888889,
"alnum_prop": 0.5356030262572319,
"repo_name": "peoplepower/composer-sdk-python",
"id": "34295fb2b47f7784285348873f224e5a09f3bb5b",
"size": "4494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "com.ppc.Bot/devices/light/lightswitch_leviton_decora.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "170609"
},
{
"name": "Shell",
"bytes": "22591"
}
],
"symlink_target": ""
} |
import pkg_resources
import pylab
import numpy as np
import sys
import tables
import motmot.fview_ext_trig.easy_decode as easy_decode
import matplotlib.ticker as mticker
from optparse import OptionParser
import pytz, datetime, time
pacific = pytz.timezone('US/Pacific')
import scipy.io
def doit(fname,options):
fname = sys.argv[1]
h5 = tables.openFile(fname,mode='r')
stroke_data=h5.root.stroke_data[:]
stroke_times = stroke_data['trigger_timestamp']
time_data=h5.root.time_data[:]
gain,offset,resids = easy_decode.get_gain_offset_resids(
input=time_data['framestamp'],
output=time_data['timestamp'])
top = h5.root.time_data.attrs.top
wordstream = h5.root.ain_wordstream[:]
wordstream = wordstream['word'] # extract into normal numpy array
r=easy_decode.easy_decode(wordstream,gain,offset,top)
if r is not None:
chans = r.dtype.fields.keys()
chans.sort()
chans.remove('timestamps')
if 0:
Vcc = h5.root.ain_wordstream.attrs.Vcc
print 'Vcc read from file at',Vcc
else:
Vcc=3.3
print 'Vcc',Vcc
ADCmax = (2**10)-1
analog_gain = Vcc/ADCmax
else:
chans = []
names = h5.root.ain_wordstream.attrs.channel_names
if r is not None:
dt = r['timestamps'][1]-r['timestamps'][0]
samps_per_sec = 1.0/dt
adc_duration = n_adc_samples*dt
print '%d samples at %.1f samples/sec = %.1f seconds'%(n_adc_samples,
samps_per_sec,
adc_duration)
t0 = r['timestamps'][0]
stroke_times_zero_offset = stroke_times-t0
if len(stroke_times_zero_offset):
stroke_data_duration = stroke_times_zero_offset[-1]
total_duration = max(stroke_data_duration,adc_duration)
else:
t0 = 0
N_subplots = len(chans)+5
ax=None
for i in range(N_subplots):
ax = pylab.subplot(N_subplots,1,i+1,sharex=ax)
if i < len(chans):
try:
label = names[int(chans[i])]
except Exception, err:
print 'ERROR: ingnoring exception %s'%(err,)
label = 'channel %s'%chans[i]
ax.plot(r['timestamps']-t_offset,r[chans[i]]*analog_gain,
label=label)
ax.set_ylabel('V')
ax.legend()
elif i == len(chans):
if np.all(np.isnan(stroke_data['right'])):
continue
ax.set_ylabel('R (degrees)')
ax.legend()
elif i == len(chans)+1:
if np.all(np.isnan(stroke_data['left'])):
continue
ax.set_ylabel('L (degrees)')
ax.legend()
elif i == len(chans)+2:
if np.all(np.isnan(stroke_data['left_antenna'])):
continue
ax.plot(stroke_times-t0,stroke_data['left_antenna'],label='Lant')
ax.set_ylabel('L antenna (degrees)')
ax.legend()
elif i == len(chans)+3:
if np.all(np.isnan(stroke_data['right_antenna'])):
continue
ax.plot(stroke_times-t0,stroke_data['right_antenna'],label='Rant')
ax.set_ylabel('R antenna (degrees)')
ax.legend()
elif i == len(chans)+4:
if np.all(np.isnan(stroke_data['head'])):
continue
ax.plot(stroke_times-t0,stroke_data['head'],label='H')
ax.set_ylabel('head (degrees)')
ax.legend()
ax.xaxis.set_major_formatter(mticker.FormatStrFormatter("%s"))
ax.yaxis.set_major_formatter(mticker.FormatStrFormatter("%s"))
ax.set_xlabel('Time (sec)')
ax.set_xlim((t_plot_start,t_plot_start+total_duration))
if options.timestamps:
pylab.gcf().autofmt_xdate()
pylab.show()
def main():
usage = '%prog [options] FILE'
parser = OptionParser(usage)
parser.add_option("--timestamps", action='store_true',
default=False)
(options, args) = parser.parse_args()
fname = args[0]
doit(fname,options)
if __name__=='__main__':
main()
| {
"content_hash": "c90f364831e0b61a0f7c5ef5a9da1f06",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 78,
"avg_line_length": 31.984732824427482,
"alnum_prop": 0.556563245823389,
"repo_name": "motmot/strokelitude",
"id": "5e530708a95b44461caea06b9d3b36300a0b57e9",
"size": "4190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot_raw_timeseries.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "106754"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from PyPDF2 import PdfFileWriter, PdfFileReader
import sys
import shutil
import time
start = time.time()
if len(sys.argv)<2:
print("usage: python xxx.py dir_of_pdf_files")
pdf_dir = sys.argv[1]
import os
def get_pdf_title(pdf_file_path):
# must be open as 'rb', otherwise will raise "PdfReadError: EOF marker not found"
with open(pdf_file_path,'rb') as f:
pdf_reader = PdfFileReader(f)
# print(pdf_file_path)
# print(pdf_reader.getDocumentInfo())
if '/Title' in pdf_reader.getDocumentInfo().keys():
return pdf_reader.getDocumentInfo()['/Title']
else:
return None
filenames = os.listdir(pdf_dir)
illegal_chars = ['\\','/',':','*','?','"','<','>','|']
# to change cvpr paper names to "16 cvpr <title>"
for fn in filenames:
if '.pdf' in fn:
if not "16 cvpr" in fn:
title = get_pdf_title(pdf_dir+'/'+fn)
if title is None:
continue
# res_name = "16 cvpr "+title+'.pdf'
for c in illegal_chars:
title = title.replace(c,"")
res_name = "16 cvpr "+title+'.pdf'
# print(res_name)
shutil.move(pdf_dir+'/'+fn, pdf_dir + '/'+ res_name)
print("{} ---> {}".format(fn, res_name))
print('time cost: {}'.format(time.time() - start))
| {
"content_hash": "40acb684893a791e0f28277d2d816fd1",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 85,
"avg_line_length": 32.97674418604651,
"alnum_prop": 0.5479548660084627,
"repo_name": "wangg12/conference_papers_download_tool",
"id": "505ac99bcac9a2cb97c62248f7b45e14b410ad07",
"size": "1489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "change_pdf_name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1525941"
},
{
"name": "Python",
"bytes": "6376"
}
],
"symlink_target": ""
} |
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
# This script generates the packer_bbox.c file from gl_header.parsed
import sys
import cPickle
import string
sys.path.append( "../glapi_parser" )
import apiutil
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE GENERATED BY THE packer_bbox.py SCRIPT */
/* These functions pack glVertex functions and also update the bounding box
* if the cr_packer_globals.updateBBOX variable is non-zero.
*/
#include "packer.h"
#include "cr_opcodes.h"
#include "pack_bbox.h"
#include <float.h>
/**
* Reset packer bounding box to empty state.
*/
void crPackResetBoundingBox( CRPackContext *pc )
{
pc->bounds_min.x = FLT_MAX;
pc->bounds_min.y = FLT_MAX;
pc->bounds_min.z = FLT_MAX;
pc->bounds_max.x = -FLT_MAX;
pc->bounds_max.y = -FLT_MAX;
pc->bounds_max.z = -FLT_MAX;
pc->updateBBOX = 1;
}
/**
* Query current bounding box.
* \return GL_TRUE if non-empty box, GL_FALSE if empty box.
*/
GLboolean crPackGetBoundingBox( CRPackContext *pc,
GLfloat *xmin, GLfloat *ymin, GLfloat *zmin,
GLfloat *xmax, GLfloat *ymax, GLfloat *zmax)
{
if (pc->bounds_min.x != FLT_MAX) {
*xmin = pc->bounds_min.x;
*ymin = pc->bounds_min.y;
*zmin = pc->bounds_min.z;
*xmax = pc->bounds_max.x;
*ymax = pc->bounds_max.y;
*zmax = pc->bounds_max.z;
return GL_TRUE;
}
else {
return GL_FALSE;
}
}
"""
def WriteData( offset, arg_type, arg_name, is_swapped ):
if string.find( arg_type, '*' ) != -1:
retval = "\tWRITE_NETWORK_POINTER( %d, (void *) %s );" % (offset, arg_name )
else:
if is_swapped:
if arg_type == "GLfloat" or arg_type == "GLclampf":
retval = "\tWRITE_DATA( %d, GLuint, SWAPFLOAT(%s) );" % (offset, arg_name)
elif arg_type == "GLdouble" or arg_type == "GLclampd":
retval = "\tWRITE_SWAPPED_DOUBLE( %d, %s );" % (offset, arg_name)
elif apiutil.sizeof(arg_type) == 1:
retval = "\tWRITE_DATA( %d, %s, %s );" % (offset, arg_type, arg_name)
elif apiutil.sizeof(arg_type) == 2:
retval = "\tWRITE_DATA( %d, %s, SWAP16(%s) );" % (offset, arg_type, arg_name)
elif apiutil.sizeof(arg_type) == 4:
retval = "\tWRITE_DATA( %d, %s, SWAP32(%s) );" % (offset, arg_type, arg_name)
else:
if arg_type == "GLdouble" or arg_type == "GLclampd":
retval = "\tWRITE_DOUBLE( %d, %s );" % (offset, arg_name)
else:
retval = "\tWRITE_DATA( %d, %s, %s );" % (offset, arg_type, arg_name)
return retval
def PrintFunction( func_name, extSuffix, num_coords, argtype,
do_swapped, do_count, do_vector ):
"""
Generate all the functions named crPackVertex[234][dfis][v]BBOX() and
crPackVertex[234][dfis][v]BBOX_COUNT().
We also handle glVertexAttrib*ARB.
Note: func_name should not have an ARB suffix.
"""
if do_count:
countSuffix = "_COUNT"
else:
countSuffix = ""
if do_swapped:
swapSuffix = "SWAP"
else:
swapSuffix = ""
if func_name[0:12] == "VertexAttrib":
isVertexAttrib = 1
else:
isVertexAttrib = 0
if argtype[0] == "N":
normalized = 1
else:
normalized = 0;
if argtype == "b" or argtype == "Nb":
vector_type = "GLbyte"
elif argtype == "ub" or argtype == "Nub":
vector_type = "GLubyte"
elif argtype == "s" or argtype == "Ns":
vector_type = "GLshort"
elif argtype == "us" or argtype == "Nus":
vector_type = "GLushort"
elif argtype == "i" or argtype == "Ni":
vector_type = "GLint"
elif argtype == "ui" or argtype == "Nui":
vector_type = "GLuint"
elif argtype == "f":
vector_type = "GLfloat"
elif argtype == "d":
vector_type = "GLdouble"
else:
print "type is %s" % argtype
abort()
if do_vector:
if isVertexAttrib:
func_name = 'VertexAttrib%d%sv' % (num_coords, argtype)
else:
func_name = 'Vertex%d%sv' % (num_coords,argtype)
params = apiutil.Parameters(func_name + extSuffix)
print 'void PACK_APIENTRY crPack%sBBOX%s%s( %s )' % (func_name + extSuffix, countSuffix,
swapSuffix, apiutil.MakeDeclarationString(params))
print '{'
if do_vector:
# vector version
packet_length = num_coords * apiutil.sizeof(vector_type)
if isVertexAttrib:
packet_length += 4 # for index
if packet_length % 4 != 0:
packet_length += 2
else:
# non-vector
packet_length = apiutil.PacketLength( params )
if isVertexAttrib:
packet_length += 0 # for index
if packet_length % 4 != 0:
packet_length += 2
print "\tGET_PACKER_CONTEXT(pc);"
print "\tunsigned char *data_ptr;"
if normalized:
if argtype == "Nb":
t = "B"
elif argtype == "Ni":
t = "I"
elif argtype == "Nui":
t = "UI"
elif argtype == "Nub":
t = "UB"
elif argtype == "Ns":
t = "S"
elif argtype == "Nus":
t = "US"
else:
abort()
if do_vector:
print "\tCREATE_%dD_VFLOATS_%s_NORMALIZED();" % (num_coords, t)
else:
print "\tCREATE_%dD_FLOATS_%s_NORMALIZED();" % (num_coords, t)
else:
if do_vector:
print "\tCREATE_%dD_VFLOATS();" % num_coords
else:
print "\tCREATE_%dD_FLOATS();" % num_coords
print "\tGET_BUFFERED%s_POINTER( pc, %d );" % (countSuffix, packet_length)
# Bounding box code
if isVertexAttrib:
print "\tif (pc->updateBBOX && index == 0)"
else:
print "\tif (pc->updateBBOX)"
print "\t{"
if num_coords < 4:
print "\t\tUPDATE_%dD_BBOX();" % num_coords
else:
print "\t\tUPDATE_3D_BBOX();"
print "\t}"
if isVertexAttrib:
print "\tif (index > 0) {"
t = argtype
print "\t\tpc->current.c.vertexAttrib.%s%d[index] = data_ptr + 4;" % (t, num_coords)
print "\t\tpc->current.attribsUsedMask |= (1 << index);"
if do_count:
print "\t\tpc->current.vtx_count--;"
print "\t}"
fname = func_name + extSuffix
if do_vector:
# use non-vector opcode
opcode = apiutil.OpcodeName( func_name[:-1] + extSuffix )
else:
opcode = apiutil.OpcodeName( func_name + extSuffix )
counter = 0
if do_vector:
if isVertexAttrib:
if do_swapped:
print "\tWRITE_DATA( 0, GLuint, SWAP32(index) );"
else:
print "\tWRITE_DATA( 0, GLuint, index );"
counter += 4
argname = params[1][0] # skip 'index' parameter
else:
argname = params[0][0]
for index in range(num_coords):
print WriteData( counter, vector_type, "%s[%d]" % (argname, index), do_swapped )
counter += apiutil.sizeof(vector_type)
if isVertexAttrib:
if do_vector == 2:
# this is a bit of a hack
print "\tWRITE_OPCODE( pc, %s );" % apiutil.OpcodeName( func_name + "ARB" )
else:
print "\tWRITE_OPCODE( pc, %s );" % apiutil.OpcodeName( func_name[:-1] + "ARB" )
else:
print "\tWRITE_OPCODE( pc, %s );" % apiutil.OpcodeName( func_name[:-1] )
else:
for index in range(0,len(params)):
(name, type, vecSize) = params[index]
print WriteData( counter, type, name, do_swapped )
counter += apiutil.sizeof(type)
if isVertexAttrib:
print "\tWRITE_OPCODE( pc, %s );" % apiutil.OpcodeName( func_name + "ARB" )
else:
print "\tWRITE_OPCODE( pc, %s );" % apiutil.OpcodeName( func_name )
print '}\n'
#end PrintFunction()
for num_coords in [2,3,4]:
for argtype in ['d', 'f', 'i', 's']:
func_name = 'Vertex%d%s' % (num_coords, argtype)
for swap in range(0, 2):
for count in range(0, 2):
for vec in range(0, 2):
PrintFunction( func_name, "", num_coords, argtype, swap,
count, vec )
for num_coords in [1,2,3,4]:
for argtype in ['d', 'f', 's']:
func_name = 'VertexAttrib%d%s' % (num_coords, argtype)
for swap in range(0, 2):
for count in range(0, 2):
for vec in range(0, 2):
PrintFunction( func_name, "ARB", num_coords, argtype, swap,
count, vec )
# Special vector functions
moreFuncs = [ [ "VertexAttrib4ubv", "ub" ],
[ "VertexAttrib4usv", "us" ],
[ "VertexAttrib4uiv", "ui" ],
[ "VertexAttrib4bv", "b" ],
[ "VertexAttrib4iv", "i" ],
[ "VertexAttrib4Nbv", "Nb" ],
[ "VertexAttrib4Nsv", "Ns" ],
[ "VertexAttrib4Niv", "Ni" ],
[ "VertexAttrib4Nubv", "Nub" ],
[ "VertexAttrib4Nusv", "Nus" ],
[ "VertexAttrib4Nuiv", "Nui" ]
]
for (func_name, argtype) in moreFuncs:
vec = 2 # special, hacked value
num_coords = 4
for swap in range(0, 2):
for count in range(0, 2):
PrintFunction( func_name, "ARB", num_coords, argtype, swap, count, vec )
# Special non-vector functions
moreFuncs = [ [ "VertexAttrib4Nub", "Nub" ] ]
for (func_name, argtype) in moreFuncs:
vec = 0
num_coords = 4
for swap in range(0, 2):
for count in range(0, 2):
PrintFunction( func_name, "ARB", num_coords, argtype, swap, count, vec )
| {
"content_hash": "7d53c1aa36420aaaf43ac4bcf88c9ca2",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 89,
"avg_line_length": 27.18471337579618,
"alnum_prop": 0.6197282099343955,
"repo_name": "boompig/chromium-stream-processing",
"id": "1c9c29ef63f9f82003559d6487e6eb29e31a9feb",
"size": "8536",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packer/packer_bbox.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "518"
},
{
"name": "C",
"bytes": "7744045"
},
{
"name": "C++",
"bytes": "13047"
},
{
"name": "CSS",
"bytes": "1802"
},
{
"name": "Groff",
"bytes": "93608"
},
{
"name": "HTML",
"bytes": "2178"
},
{
"name": "Makefile",
"bytes": "98396"
},
{
"name": "Perl",
"bytes": "7595"
},
{
"name": "Perl6",
"bytes": "27715"
},
{
"name": "Python",
"bytes": "727751"
}
],
"symlink_target": ""
} |
from flask import render_template, send_from_directory
from passgen import app, static_dir
@app.route('/assets/<path:path>')
def send_assets(path):
return send_from_directory(static_dir, path)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
| {
"content_hash": "ed47bfca5c1bc25e01297fdad24c2bac",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 54,
"avg_line_length": 22.23076923076923,
"alnum_prop": 0.7093425605536332,
"repo_name": "odedlaz/passgen",
"id": "170fa9b54eca8f724168c45d79d5e060b0c5ec8d",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "passgen/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1846"
},
{
"name": "JavaScript",
"bytes": "11432"
},
{
"name": "Python",
"bytes": "745"
}
],
"symlink_target": ""
} |
import datetime
import os
import time
import torch
import torch.utils.data
from filelock import FileLock
from torch import nn
import torchvision
import ray
from ray.util.sgd.torch.examples.segmentation.coco_utils import get_coco
import ray.util.sgd.torch.examples.segmentation.transforms as T
import ray.util.sgd.torch.examples.segmentation.utils as utils
from ray.util.sgd.torch import TrainingOperator
from ray.util.sgd import TorchTrainer
try:
from apex import amp
except ImportError:
amp = None
def get_dataset(name,
image_set,
transform,
num_classes_only=False,
download="auto"):
def sbd(*args, **kwargs):
return torchvision.datasets.SBDataset(
*args, mode="segmentation", **kwargs)
paths = {
"voc": (os.path.expanduser("~/datasets01/VOC/060817/"),
torchvision.datasets.VOCSegmentation, 21),
"voc_aug": (os.path.expanduser("~/datasets01/SBDD/072318/"), sbd, 21),
"coco": (os.path.expanduser("~/datasets01/COCO/022719/"), get_coco, 21)
}
p, ds_fn, num_classes = paths[name]
if num_classes_only:
return None, num_classes
if download == "auto" and os.path.exists(p):
download = False
try:
ds = ds_fn(
p, download=download, image_set=image_set, transforms=transform)
except RuntimeError:
print("data loading failed. Retrying this.")
ds = ds_fn(p, download=True, image_set=image_set, transforms=transform)
return ds, num_classes
def get_transform(train):
base_size = 520
crop_size = 480
min_size = int((0.5 if train else 1.0) * base_size)
max_size = int((2.0 if train else 1.0) * base_size)
transforms = []
transforms.append(T.RandomResize(min_size, max_size))
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
transforms.append(T.RandomCrop(crop_size))
transforms.append(T.ToTensor())
transforms.append(
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
return T.Compose(transforms)
def criterion(inputs, target):
losses = {}
for name, x in inputs.items():
losses[name] = nn.functional.cross_entropy(x, target, ignore_index=255)
if len(losses) == 1:
return losses["out"]
return losses["out"] + 0.5 * losses["aux"]
def get_optimizer(model, aux_loss):
params_to_optimize = [
{
"params": [
p for p in model.backbone.parameters() if p.requires_grad
]
},
{
"params": [
p for p in model.classifier.parameters() if p.requires_grad
]
},
]
if aux_loss:
params = [
p for p in model.aux_classifier.parameters() if p.requires_grad
]
params_to_optimize.append({"params": params, "lr": args.lr * 10})
optimizer = torch.optim.SGD(
params_to_optimize,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
return optimizer
class SegOperator(TrainingOperator):
def setup(self, config):
args = config["args"]
# Create Data Loaders.
with FileLock(".ray.lock"):
# Within a machine, this code runs synchronously.
dataset, num_classes = get_dataset(
args.dataset, "train", get_transform(train=True))
config["num_classes"] = num_classes
dataset_test, _ = get_dataset(
args.dataset, "val", get_transform(train=False))
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.data_workers,
collate_fn=utils.collate_fn,
drop_last=True)
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=1,
num_workers=args.data_workers,
collate_fn=utils.collate_fn)
# Create model.
model = torchvision.models.segmentation.__dict__[args.model](
num_classes=config["num_classes"],
aux_loss=args.aux_loss,
pretrained=args.pretrained)
if config["num_workers"] > 1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# Create optimizer.
optimizer = get_optimizer(model, aux_loss=args.aux_loss)
# Register components.
self.model, self.optimizer = self.register(
models=model,
optimizers=optimizer,
train_loader=data_loader,
validation_loader=data_loader_test)
def train_batch(self, batch, batch_info):
image, target = batch
image, target = image.to(self.device), target.to(self.device)
output = self.model(image)
loss = criterion(output, target)
self.optimizer.zero_grad()
if self.use_fp16 and amp:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.optimizer.step()
lr = self.optimizer.param_groups[0]["lr"]
return {"loss": loss.item(), "lr": lr, "num_samples": len(batch)}
def validate(self, data_loader, info=None):
self.model.eval()
confmat = utils.ConfusionMatrix(self.config["num_classes"])
with torch.no_grad():
for image, target in data_loader:
image, target = image.to(self.device), target.to(self.device)
output = self.model(image)
output = output["out"]
confmat.update(target.flatten(), output.argmax(1).flatten())
confmat.reduce_from_all_processes()
return confmat
def main(args):
os.makedirs(args.output_dir, exist_ok=True)
print(args)
start_time = time.time()
config = {"args": args, "num_workers": args.num_workers}
trainer = TorchTrainer(
training_operator_cls=SegOperator,
use_tqdm=True,
use_fp16=True,
num_workers=config["num_workers"],
config=config,
use_gpu=torch.cuda.is_available())
for epoch in range(args.epochs):
trainer.train()
confmat = trainer.validate(reduce_results=False)[0]
print(confmat)
state_dict = trainer.state_dict()
state_dict.update(epoch=epoch, args=args)
torch.save(state_dict,
os.path.join(args.output_dir, f"model_{epoch}.pth"))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Training time {total_time_str}")
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="PyTorch Segmentation Training with RaySGD")
parser.add_argument(
"--address",
required=False,
default=None,
help="the address to use for connecting to a Ray cluster.")
parser.add_argument("--dataset", default="voc", help="dataset")
parser.add_argument("--model", default="fcn_resnet101", help="model")
parser.add_argument(
"--aux-loss", action="store_true", help="auxiliar loss")
parser.add_argument("--device", default="cuda", help="device")
parser.add_argument("-b", "--batch-size", default=8, type=int)
parser.add_argument(
"-n", "--num-workers", default=1, type=int, help="GPU parallelism")
parser.add_argument(
"--epochs",
default=30,
type=int,
metavar="N",
help="number of total epochs to run")
parser.add_argument(
"--data-workers",
default=16,
type=int,
metavar="N",
help="number of data loading workers (default: 16)")
parser.add_argument(
"--lr", default=0.01, type=float, help="initial learning rate")
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay")
parser.add_argument("--output-dir", default=".", help="path where to save")
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
ray.init(address=args.address)
main(args)
| {
"content_hash": "8c1b0e9dca6275639208f4b9e074d1cc",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 79,
"avg_line_length": 32.19548872180451,
"alnum_prop": 0.5959831854273704,
"repo_name": "richardliaw/ray",
"id": "afefdac305f4671cf2f0ca8e7fd658a31f47daee",
"size": "8564",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/ray/util/sgd/torch/examples/segmentation/train_segmentation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
} |
"""Script for testing ganeti.hypervisor.hv_xen"""
import string # pylint: disable=W0402
import unittest
import tempfile
import shutil
import random
import os
import mock
from ganeti import constants
from ganeti import objects
from ganeti import pathutils
from ganeti import hypervisor
from ganeti import utils
from ganeti import errors
from ganeti import compat
from ganeti.hypervisor import hv_base
from ganeti.hypervisor import hv_xen
import testutils
# Map from hypervisor class to hypervisor name
HVCLASS_TO_HVNAME = utils.InvertDict(hypervisor._HYPERVISOR_MAP)
class TestConsole(unittest.TestCase):
def test(self):
hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
for cls in [hv_xen.XenPvmHypervisor(), hv_xen.XenHvmHypervisor()]:
instance = objects.Instance(name="xen.example.com",
primary_node="node24828-uuid")
node = objects.Node(name="node24828", uuid="node24828-uuid",
ndparams={})
group = objects.NodeGroup(name="group52341", ndparams={})
cons = cls.GetInstanceConsole(instance, node, group, hvparams, {})
self.assertEqual(cons.Validate(), None)
self.assertEqual(cons.kind, constants.CONS_SSH)
self.assertEqual(cons.host, node.name)
self.assertEqual(cons.command[-1], instance.name)
class TestCreateConfigCpus(unittest.TestCase):
def testEmpty(self):
for cpu_mask in [None, ""]:
self.assertEqual(hv_xen._CreateConfigCpus(cpu_mask),
"cpus = [ ]")
def testAll(self):
self.assertEqual(hv_xen._CreateConfigCpus(constants.CPU_PINNING_ALL),
None)
def testOne(self):
self.assertEqual(hv_xen._CreateConfigCpus("9"), "cpu = \"9\"")
def testMultiple(self):
self.assertEqual(hv_xen._CreateConfigCpus("0-2,4,5-5:3:all"),
("cpus = [ \"0,1,2,4,5\", \"3\", \"%s\" ]" %
constants.CPU_PINNING_ALL_XEN))
class TestGetCommand(testutils.GanetiTestCase):
def testCommandExplicit(self):
"""Test the case when the command is given as class parameter explicitly.
"""
expected_cmd = "xl"
hv = hv_xen.XenHypervisor(_cmd=constants.XEN_CMD_XL)
self.assertEqual(hv._GetCommand(None), expected_cmd)
def testCommandInvalid(self):
"""Test the case an invalid command is given as class parameter explicitly.
"""
hv = hv_xen.XenHypervisor(_cmd="invalidcommand")
self.assertRaises(errors.ProgrammerError, hv._GetCommand, None)
def testCommandHvparams(self):
expected_cmd = "xl"
test_hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
hv = hv_xen.XenHypervisor()
self.assertEqual(hv._GetCommand(test_hvparams), expected_cmd)
def testCommandHvparamsInvalid(self):
test_hvparams = {}
hv = hv_xen.XenHypervisor()
self.assertRaises(errors.HypervisorError, hv._GetCommand, test_hvparams)
def testCommandHvparamsCmdInvalid(self):
test_hvparams = {constants.HV_XEN_CMD: "invalidcommand"}
hv = hv_xen.XenHypervisor()
self.assertRaises(errors.ProgrammerError, hv._GetCommand, test_hvparams)
class TestParseInstanceList(testutils.GanetiTestCase):
def test(self):
data = testutils.ReadTestData("xen-xm-list-4.0.1-dom0-only.txt")
# Exclude node
self.assertEqual(hv_xen._ParseInstanceList(data.splitlines(), False), [])
# Include node
result = hv_xen._ParseInstanceList(data.splitlines(), True)
self.assertEqual(len(result), 1)
self.assertEqual(len(result[0]), 6)
# Name
self.assertEqual(result[0][0], hv_xen._DOM0_NAME)
# ID
self.assertEqual(result[0][1], 0)
# Memory
self.assertEqual(result[0][2], 1023)
# VCPUs
self.assertEqual(result[0][3], 1)
# State
self.assertEqual(result[0][4], hv_base.HvInstanceState.RUNNING)
# Time
self.assertAlmostEqual(result[0][5], 121152.6)
def testWrongLineFormat(self):
tests = [
["three fields only"],
["name InvalidID 128 1 r----- 12345"],
]
for lines in tests:
try:
hv_xen._ParseInstanceList(["Header would be here"] + lines, False)
except errors.HypervisorError, err:
self.assertTrue("Can't parse instance list" in str(err))
else:
self.fail("Exception was not raised")
class TestGetInstanceList(testutils.GanetiTestCase):
def _Fail(self):
return utils.RunResult(constants.EXIT_FAILURE, None,
"stdout", "stderr", None,
NotImplemented, NotImplemented)
def testTimeout(self):
fn = testutils.CallCounter(self._Fail)
try:
hv_xen._GetRunningInstanceList(fn, False, _timeout=0.1)
except errors.HypervisorError, err:
self.assertTrue("timeout exceeded" in str(err))
else:
self.fail("Exception was not raised")
self.assertTrue(fn.Count() < 10,
msg="'xm list' was called too many times")
def _Success(self, stdout):
return utils.RunResult(constants.EXIT_SUCCESS, None, stdout, "", None,
NotImplemented, NotImplemented)
def testSuccess(self):
data = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt")
fn = testutils.CallCounter(compat.partial(self._Success, data))
result = hv_xen._GetRunningInstanceList(fn, True, _timeout=0.1)
self.assertEqual(len(result), 4)
self.assertEqual(map(compat.fst, result), [
"Domain-0",
"server01.example.com",
"web3106215069.example.com",
"testinstance.example.com",
])
self.assertEqual(fn.Count(), 1)
class TestParseNodeInfo(testutils.GanetiTestCase):
def testEmpty(self):
self.assertEqual(hv_xen._ParseNodeInfo(""), {})
def testUnknownInput(self):
data = "\n".join([
"foo bar",
"something else goes",
"here",
])
self.assertEqual(hv_xen._ParseNodeInfo(data), {})
def testBasicInfo(self):
data = testutils.ReadTestData("xen-xm-info-4.0.1.txt")
result = hv_xen._ParseNodeInfo(data)
self.assertEqual(result, {
"cpu_nodes": 1,
"cpu_sockets": 2,
"cpu_total": 4,
"hv_version": (4, 0),
"memory_free": 8004,
"memory_total": 16378,
})
class TestMergeInstanceInfo(testutils.GanetiTestCase):
def testEmpty(self):
self.assertEqual(hv_xen._MergeInstanceInfo({}, []), {})
def _FakeXmList(self, include_node):
return [
(hv_xen._DOM0_NAME, NotImplemented, 4096, 7, NotImplemented,
NotImplemented),
("inst1.example.com", NotImplemented, 2048, 4, NotImplemented,
NotImplemented),
]
def testMissingNodeInfo(self):
instance_list = self._FakeXmList(True)
result = hv_xen._MergeInstanceInfo({}, instance_list)
self.assertEqual(result, {
"memory_dom0": 4096,
"cpu_dom0": 7,
})
def testWithNodeInfo(self):
info = testutils.ReadTestData("xen-xm-info-4.0.1.txt")
instance_list = self._FakeXmList(True)
result = hv_xen._GetNodeInfo(info, instance_list)
self.assertEqual(result, {
"cpu_nodes": 1,
"cpu_sockets": 2,
"cpu_total": 4,
"cpu_dom0": 7,
"hv_version": (4, 0),
"memory_dom0": 4096,
"memory_free": 8004,
"memory_hv": 2230,
"memory_total": 16378,
})
class TestGetConfigFileDiskData(unittest.TestCase):
def testLetterCount(self):
self.assertEqual(len(hv_xen._DISK_LETTERS), 26)
def testNoDisks(self):
self.assertEqual(hv_xen._GetConfigFileDiskData([], "hd"), [])
def testManyDisks(self):
for offset in [0, 1, 10]:
disks = [(objects.Disk(dev_type=constants.DT_PLAIN),
"/tmp/disk/%s" % idx,
NotImplemented)
for idx in range(len(hv_xen._DISK_LETTERS) + offset)]
if offset == 0:
result = hv_xen._GetConfigFileDiskData(disks, "hd")
self.assertEqual(result, [
"'phy:/tmp/disk/%s,hd%s,r'" % (idx, string.ascii_lowercase[idx])
for idx in range(len(hv_xen._DISK_LETTERS) + offset)
])
else:
try:
hv_xen._GetConfigFileDiskData(disks, "hd")
except errors.HypervisorError, err:
self.assertEqual(str(err), "Too many disks")
else:
self.fail("Exception was not raised")
def testTwoLvDisksWithMode(self):
disks = [
(objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDWR),
"/tmp/diskFirst",
NotImplemented),
(objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDONLY),
"/tmp/diskLast",
NotImplemented),
]
result = hv_xen._GetConfigFileDiskData(disks, "hd")
self.assertEqual(result, [
"'phy:/tmp/diskFirst,hda,w'",
"'phy:/tmp/diskLast,hdb,r'",
])
def testFileDisks(self):
disks = [
(objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR,
logical_id=[constants.FD_LOOP]),
"/tmp/diskFirst",
NotImplemented),
(objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDONLY,
logical_id=[constants.FD_BLKTAP]),
"/tmp/diskTwo",
NotImplemented),
(objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR,
logical_id=[constants.FD_LOOP]),
"/tmp/diskThree",
NotImplemented),
(objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDONLY,
logical_id=[constants.FD_BLKTAP2]),
"/tmp/diskFour",
NotImplemented),
(objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR,
logical_id=[constants.FD_BLKTAP]),
"/tmp/diskLast",
NotImplemented),
]
result = hv_xen._GetConfigFileDiskData(disks, "sd")
self.assertEqual(result, [
"'file:/tmp/diskFirst,sda,w'",
"'tap:aio:/tmp/diskTwo,sdb,r'",
"'file:/tmp/diskThree,sdc,w'",
"'tap2:tapdisk:aio:/tmp/diskFour,sdd,r'",
"'tap:aio:/tmp/diskLast,sde,w'",
])
def testInvalidFileDisk(self):
disks = [
(objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR,
logical_id=["#unknown#"]),
"/tmp/diskinvalid",
NotImplemented),
]
self.assertRaises(KeyError, hv_xen._GetConfigFileDiskData, disks, "sd")
class TestXenHypervisorRunXen(unittest.TestCase):
XEN_SUB_CMD = "help"
def testCommandUnknown(self):
cmd = "#unknown command#"
self.assertFalse(cmd in constants.KNOWN_XEN_COMMANDS)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=NotImplemented,
_cmd=cmd)
self.assertRaises(errors.ProgrammerError, hv._RunXen, [], None)
def testCommandNoHvparams(self):
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=NotImplemented)
hvparams = None
self.assertRaises(errors.HypervisorError, hv._RunXen, [self.XEN_SUB_CMD],
hvparams)
def testCommandFromHvparams(self):
expected_xen_cmd = "xl"
hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
mock_run_cmd = mock.Mock()
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
hv._RunXen([self.XEN_SUB_CMD], hvparams=hvparams)
mock_run_cmd.assert_called_with([expected_xen_cmd, self.XEN_SUB_CMD])
class TestXenHypervisorGetInstanceList(unittest.TestCase):
RESULT_OK = utils.RunResult(0, None, "", "", "", None, None)
XEN_LIST = "list"
def testNoHvparams(self):
expected_xen_cmd = "xm"
mock_run_cmd = mock.Mock(return_value=self.RESULT_OK)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
self.assertRaises(errors.HypervisorError, hv._GetInstanceList, True, None)
def testFromHvparams(self):
expected_xen_cmd = "xl"
hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
mock_run_cmd = mock.Mock(return_value=self.RESULT_OK)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
hv._GetInstanceList(True, hvparams)
mock_run_cmd.assert_called_with([expected_xen_cmd, self.XEN_LIST])
class TestXenHypervisorListInstances(unittest.TestCase):
RESULT_OK = utils.RunResult(0, None, "", "", "", None, None)
XEN_LIST = "list"
def testNoHvparams(self):
expected_xen_cmd = "xm"
mock_run_cmd = mock.Mock(return_value=self.RESULT_OK)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
self.assertRaises(errors.HypervisorError, hv.ListInstances)
def testHvparamsXl(self):
expected_xen_cmd = "xl"
hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
mock_run_cmd = mock.Mock(return_value=self.RESULT_OK)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
hv.ListInstances(hvparams=hvparams)
mock_run_cmd.assert_called_with([expected_xen_cmd, self.XEN_LIST])
class TestXenHypervisorCheckToolstack(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.cfg_name = "xen_config"
self.cfg_path = utils.PathJoin(self.tmpdir, self.cfg_name)
self.hv = hv_xen.XenHypervisor()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testBinaryNotFound(self):
RESULT_FAILED = utils.RunResult(1, None, "", "", "", None, None)
mock_run_cmd = mock.Mock(return_value=RESULT_FAILED)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
result = hv._CheckToolstackBinary("xl")
self.assertFalse(result)
def testCheckToolstackXlConfigured(self):
RESULT_OK = utils.RunResult(0, None, "", "", "", None, None)
mock_run_cmd = mock.Mock(return_value=RESULT_OK)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
result = hv._CheckToolstackXlConfigured()
self.assertTrue(result)
def testCheckToolstackXlNotConfigured(self):
RESULT_FAILED = utils.RunResult(
1, None, "",
"ERROR: A different toolstack (xm) has been selected!",
"", None, None)
mock_run_cmd = mock.Mock(return_value=RESULT_FAILED)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
result = hv._CheckToolstackXlConfigured()
self.assertFalse(result)
def testCheckToolstackXlFails(self):
RESULT_FAILED = utils.RunResult(
1, None, "",
"ERROR: The pink bunny hid the binary.",
"", None, None)
mock_run_cmd = mock.Mock(return_value=RESULT_FAILED)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
self.assertRaises(errors.HypervisorError, hv._CheckToolstackXlConfigured)
class TestXenHypervisorWriteConfigFile(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testWriteError(self):
cfgdir = utils.PathJoin(self.tmpdir, "foobar")
hv = hv_xen.XenHypervisor(_cfgdir=cfgdir,
_run_cmd_fn=NotImplemented,
_cmd=NotImplemented)
self.assertFalse(os.path.exists(cfgdir))
try:
hv._WriteConfigFile("name", "data")
except errors.HypervisorError, err:
self.assertTrue(str(err).startswith("Cannot write Xen instance"))
else:
self.fail("Exception was not raised")
class TestXenHypervisorVerify(unittest.TestCase):
def setUp(self):
output = testutils.ReadTestData("xen-xm-info-4.0.1.txt")
self._result_ok = utils.RunResult(0, None, output, "", "", None, None)
def testVerify(self):
hvparams = {constants.HV_XEN_CMD : constants.XEN_CMD_XL}
mock_run_cmd = mock.Mock(return_value=self._result_ok)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
hv._CheckToolstack = mock.Mock(return_value=True)
result = hv.Verify(hvparams)
self.assertTrue(result is None)
def testVerifyToolstackNotOk(self):
hvparams = {constants.HV_XEN_CMD : constants.XEN_CMD_XL}
mock_run_cmd = mock.Mock(return_value=self._result_ok)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
hv._CheckToolstack = mock.Mock()
hv._CheckToolstack.side_effect = errors.HypervisorError("foo")
result = hv.Verify(hvparams)
self.assertTrue(result is not None)
def testVerifyFailing(self):
result_failed = utils.RunResult(1, None, "", "", "", None, None)
mock_run_cmd = mock.Mock(return_value=result_failed)
hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented,
_run_cmd_fn=mock_run_cmd)
hv._CheckToolstack = mock.Mock(return_value=True)
result = hv.Verify()
self.assertTrue(result is not None)
class _TestXenHypervisor(object):
TARGET = NotImplemented
CMD = NotImplemented
HVNAME = NotImplemented
VALID_HVPARAMS = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
def setUp(self):
super(_TestXenHypervisor, self).setUp()
self.tmpdir = tempfile.mkdtemp()
self.vncpw = "".join(random.sample(string.ascii_letters, 10))
self.vncpw_path = utils.PathJoin(self.tmpdir, "vncpw")
utils.WriteFile(self.vncpw_path, data=self.vncpw)
def tearDown(self):
super(_TestXenHypervisor, self).tearDown()
shutil.rmtree(self.tmpdir)
def _GetHv(self, run_cmd=NotImplemented):
return self.TARGET(_cfgdir=self.tmpdir, _run_cmd_fn=run_cmd, _cmd=self.CMD)
def _SuccessCommand(self, stdout, cmd):
self.assertEqual(cmd[0], self.CMD)
return utils.RunResult(constants.EXIT_SUCCESS, None, stdout, "", None,
NotImplemented, NotImplemented)
def _FailingCommand(self, cmd):
self.assertEqual(cmd[0], self.CMD)
return utils.RunResult(constants.EXIT_FAILURE, None,
"", "This command failed", None,
NotImplemented, NotImplemented)
def _FakeTcpPing(self, expected, result, target, port, **kwargs):
self.assertEqual((target, port), expected)
return result
def testReadingNonExistentConfigFile(self):
hv = self._GetHv()
try:
hv._ReadConfigFile("inst15780.example.com")
except errors.HypervisorError, err:
self.assertTrue(str(err).startswith("Failed to load Xen config file:"))
else:
self.fail("Exception was not raised")
def testRemovingAutoConfigFile(self):
name = "inst8206.example.com"
cfgfile = utils.PathJoin(self.tmpdir, name)
autodir = utils.PathJoin(self.tmpdir, "auto")
autocfgfile = utils.PathJoin(autodir, name)
os.mkdir(autodir)
utils.WriteFile(autocfgfile, data="")
hv = self._GetHv()
self.assertTrue(os.path.isfile(autocfgfile))
hv._WriteConfigFile(name, "content")
self.assertFalse(os.path.exists(autocfgfile))
self.assertEqual(utils.ReadFile(cfgfile), "content")
def _XenList(self, cmd):
self.assertEqual(cmd, [self.CMD, "list"])
# TODO: Use actual data from "xl" command
output = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt")
return self._SuccessCommand(output, cmd)
def testGetInstanceInfo(self):
hv = self._GetHv(run_cmd=self._XenList)
(name, instid, memory, vcpus, state, runtime) = \
hv.GetInstanceInfo("server01.example.com")
self.assertEqual(name, "server01.example.com")
self.assertEqual(instid, 1)
self.assertEqual(memory, 1024)
self.assertEqual(vcpus, 1)
self.assertEqual(state, hv_base.HvInstanceState.RUNNING)
self.assertAlmostEqual(runtime, 167643.2)
def testGetInstanceInfoDom0(self):
hv = self._GetHv(run_cmd=self._XenList)
# TODO: Not sure if this is actually used anywhere (can't find it), but the
# code supports querying for Dom0
(name, instid, memory, vcpus, state, runtime) = \
hv.GetInstanceInfo(hv_xen._DOM0_NAME)
self.assertEqual(name, "Domain-0")
self.assertEqual(instid, 0)
self.assertEqual(memory, 1023)
self.assertEqual(vcpus, 1)
self.assertEqual(state, hv_base.HvInstanceState.RUNNING)
self.assertAlmostEqual(runtime, 154706.1)
def testGetInstanceInfoUnknown(self):
hv = self._GetHv(run_cmd=self._XenList)
result = hv.GetInstanceInfo("unknown.example.com")
self.assertTrue(result is None)
def testGetAllInstancesInfo(self):
hv = self._GetHv(run_cmd=self._XenList)
result = hv.GetAllInstancesInfo()
self.assertEqual(map(compat.fst, result), [
"server01.example.com",
"web3106215069.example.com",
"testinstance.example.com",
])
def testListInstances(self):
hv = self._GetHv(run_cmd=self._XenList)
self.assertEqual(hv.ListInstances(), [
"server01.example.com",
"web3106215069.example.com",
"testinstance.example.com",
])
def _StartInstanceCommand(self, inst, paused, failcreate, cmd):
if cmd == [self.CMD, "info"]:
output = testutils.ReadTestData("xen-xm-info-4.0.1.txt")
elif cmd == [self.CMD, "list"]:
output = testutils.ReadTestData("xen-xm-list-4.0.1-dom0-only.txt")
elif cmd[:2] == [self.CMD, "create"]:
args = cmd[2:]
cfgfile = utils.PathJoin(self.tmpdir, inst.name)
if paused:
self.assertEqual(args, ["-p", cfgfile])
else:
self.assertEqual(args, [cfgfile])
if failcreate:
return self._FailingCommand(cmd)
output = ""
else:
self.fail("Unhandled command: %s" % (cmd, ))
return self._SuccessCommand(output, cmd)
def _MakeInstance(self):
# Copy default parameters
bep = objects.FillDict(constants.BEC_DEFAULTS, {})
hvp = objects.FillDict(constants.HVC_DEFAULTS[self.HVNAME], {})
# Override default VNC password file path
if constants.HV_VNC_PASSWORD_FILE in hvp:
hvp[constants.HV_VNC_PASSWORD_FILE] = self.vncpw_path
disks = [
(objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDWR),
utils.PathJoin(self.tmpdir, "disk0"),
NotImplemented),
(objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDONLY),
utils.PathJoin(self.tmpdir, "disk1"),
NotImplemented),
]
inst = objects.Instance(name="server01.example.com",
hvparams=hvp, beparams=bep,
osparams={}, nics=[], os="deb1",
disks=map(compat.fst, disks))
inst.UpgradeConfig()
return (inst, disks)
def testStartInstance(self):
(inst, disks) = self._MakeInstance()
pathutils.LOG_XEN_DIR = self.tmpdir
for failcreate in [False, True]:
for paused in [False, True]:
run_cmd = compat.partial(self._StartInstanceCommand,
inst, paused, failcreate)
hv = self._GetHv(run_cmd=run_cmd)
# Ensure instance is not listed
self.assertTrue(inst.name not in hv.ListInstances())
# Remove configuration
cfgfile = utils.PathJoin(self.tmpdir, inst.name)
utils.RemoveFile(cfgfile)
if failcreate:
self.assertRaises(errors.HypervisorError, hv.StartInstance,
inst, disks, paused)
# Check whether a stale config file is left behind
self.assertFalse(os.path.exists(cfgfile))
else:
hv.StartInstance(inst, disks, paused)
# Check if configuration was updated
lines = utils.ReadFile(cfgfile).splitlines()
if constants.HV_VNC_PASSWORD_FILE in inst.hvparams:
self.assertTrue(("vncpasswd = '%s'" % self.vncpw) in lines)
else:
extra = inst.hvparams[constants.HV_KERNEL_ARGS]
self.assertTrue(("extra = '%s'" % extra) in lines)
def _StopInstanceCommand(self, instance_name, force, fail, full_cmd):
# Remove the timeout (and its number of seconds) if it's there
if full_cmd[:1][0] == "timeout":
cmd = full_cmd[2:]
else:
cmd = full_cmd
# Test the actual command
if (cmd == [self.CMD, "list"]):
output = "Name ID Mem VCPUs State Time(s)\n" \
"Domain-0 0 1023 1 r----- 142691.0\n" \
"%s 417 128 1 r----- 3.2\n" % instance_name
elif cmd[:2] == [self.CMD, "destroy"]:
self.assertEqual(cmd[2:], [instance_name])
output = ""
elif not force and cmd[:3] == [self.CMD, "shutdown", "-w"]:
self.assertEqual(cmd[3:], [instance_name])
output = ""
else:
self.fail("Unhandled command: %s" % (cmd, ))
if fail:
# Simulate a failing command
return self._FailingCommand(cmd)
else:
return self._SuccessCommand(output, cmd)
def testStopInstance(self):
name = "inst4284.example.com"
cfgfile = utils.PathJoin(self.tmpdir, name)
cfgdata = "config file content\n"
for force in [False, True]:
for fail in [False, True]:
utils.WriteFile(cfgfile, data=cfgdata)
run_cmd = compat.partial(self._StopInstanceCommand, name, force, fail)
hv = self._GetHv(run_cmd=run_cmd)
self.assertTrue(os.path.isfile(cfgfile))
if fail:
try:
hv._StopInstance(name, force, None,
constants.DEFAULT_SHUTDOWN_TIMEOUT)
except errors.HypervisorError, err:
self.assertTrue(str(err).startswith("listing instances failed"),
msg=str(err))
else:
self.fail("Exception was not raised")
self.assertEqual(utils.ReadFile(cfgfile), cfgdata,
msg=("Configuration was removed when stopping"
" instance failed"))
else:
hv._StopInstance(name, force, None,
constants.DEFAULT_SHUTDOWN_TIMEOUT)
self.assertFalse(os.path.exists(cfgfile))
def _MigrateNonRunningInstCmd(self, cmd):
if cmd == [self.CMD, "list"]:
output = testutils.ReadTestData("xen-xm-list-4.0.1-dom0-only.txt")
else:
self.fail("Unhandled command: %s" % (cmd, ))
return self._SuccessCommand(output, cmd)
def testMigrateInstanceNotRunning(self):
name = "nonexistinginstance.example.com"
target = constants.IP4_ADDRESS_LOCALHOST
port = 14618
hv = self._GetHv(run_cmd=self._MigrateNonRunningInstCmd)
for live in [False, True]:
try:
hv._MigrateInstance(NotImplemented, name, target, port, live,
self.VALID_HVPARAMS, _ping_fn=NotImplemented)
except errors.HypervisorError, err:
self.assertEqual(str(err), "Instance not running, cannot migrate")
else:
self.fail("Exception was not raised")
def _MigrateInstTargetUnreachCmd(self, cmd):
if cmd == [self.CMD, "list"]:
output = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt")
else:
self.fail("Unhandled command: %s" % (cmd, ))
return self._SuccessCommand(output, cmd)
def testMigrateTargetUnreachable(self):
name = "server01.example.com"
target = constants.IP4_ADDRESS_LOCALHOST
port = 28349
hv = self._GetHv(run_cmd=self._MigrateInstTargetUnreachCmd)
hvparams = {constants.HV_XEN_CMD: self.CMD}
for live in [False, True]:
if self.CMD == constants.XEN_CMD_XL:
# TODO: Detect unreachable targets
pass
else:
try:
hv._MigrateInstance(NotImplemented, name, target, port, live,
hvparams,
_ping_fn=compat.partial(self._FakeTcpPing,
(target, port), False))
except errors.HypervisorError, err:
wanted = "Remote host %s not" % target
self.assertTrue(str(err).startswith(wanted))
else:
self.fail("Exception was not raised")
def _MigrateInstanceCmd(self, cluster_name, instance_name, target, port,
live, fail, cmd):
if cmd == [self.CMD, "list"]:
output = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt")
elif cmd[:2] == [self.CMD, "migrate"]:
if self.CMD == constants.XEN_CMD_XM:
args = ["-p", str(port)]
if live:
args.append("-l")
elif self.CMD == constants.XEN_CMD_XL:
args = [
"-s", constants.XL_SSH_CMD % cluster_name,
"-C", utils.PathJoin(self.tmpdir, instance_name),
]
else:
self.fail("Unknown Xen command '%s'" % self.CMD)
args.extend([instance_name, target])
self.assertEqual(cmd[2:], args)
if fail:
return self._FailingCommand(cmd)
output = ""
else:
self.fail("Unhandled command: %s" % (cmd, ))
return self._SuccessCommand(output, cmd)
def testMigrateInstance(self):
clustername = "cluster.example.com"
instname = "server01.example.com"
target = constants.IP4_ADDRESS_LOCALHOST
port = 22364
hvparams = {constants.HV_XEN_CMD: self.CMD}
for live in [False, True]:
for fail in [False, True]:
ping_fn = \
testutils.CallCounter(compat.partial(self._FakeTcpPing,
(target, port), True))
run_cmd = \
compat.partial(self._MigrateInstanceCmd,
clustername, instname, target, port, live,
fail)
hv = self._GetHv(run_cmd=run_cmd)
if fail:
try:
hv._MigrateInstance(clustername, instname, target, port, live,
hvparams, _ping_fn=ping_fn)
except errors.HypervisorError, err:
self.assertTrue(str(err).startswith("Failed to migrate instance"))
else:
self.fail("Exception was not raised")
else:
hv._MigrateInstance(clustername, instname, target, port, live,
hvparams, _ping_fn=ping_fn)
if self.CMD == constants.XEN_CMD_XM:
expected_pings = 1
else:
expected_pings = 0
self.assertEqual(ping_fn.Count(), expected_pings)
def _GetNodeInfoCmd(self, fail, cmd):
if cmd == [self.CMD, "info"]:
if fail:
return self._FailingCommand(cmd)
else:
output = testutils.ReadTestData("xen-xm-info-4.0.1.txt")
elif cmd == [self.CMD, "list"]:
if fail:
self.fail("'xm list' shouldn't be called when 'xm info' failed")
else:
output = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt")
else:
self.fail("Unhandled command: %s" % (cmd, ))
return self._SuccessCommand(output, cmd)
def testGetNodeInfo(self):
run_cmd = compat.partial(self._GetNodeInfoCmd, False)
hv = self._GetHv(run_cmd=run_cmd)
result = hv.GetNodeInfo()
self.assertEqual(result["hv_version"], (4, 0))
self.assertEqual(result["memory_free"], 8004)
def testGetNodeInfoFailing(self):
run_cmd = compat.partial(self._GetNodeInfoCmd, True)
hv = self._GetHv(run_cmd=run_cmd)
self.assertTrue(hv.GetNodeInfo() is None)
class TestXenVersionsSafeForMigration(unittest.TestCase):
def testHVVersionsLikelySafeForMigration(self):
hv = hv_xen.XenHypervisor()
self.assertTrue(hv.VersionsSafeForMigration([4, 0], [4, 1]))
self.assertFalse(hv.VersionsSafeForMigration([4, 1], [4, 0]))
self.assertFalse(hv.VersionsSafeForMigration([4, 0], [4, 2]))
self.assertTrue(hv.VersionsSafeForMigration([4, 2, 7], [4, 2, 9]))
self.assertTrue(hv.VersionsSafeForMigration([4, 2, 9], [4, 2, 7]))
self.assertTrue(hv.VersionsSafeForMigration([4], [4]))
self.assertFalse(hv.VersionsSafeForMigration([4], [5]))
def _MakeTestClass(cls, cmd):
"""Makes a class for testing.
The returned class has structure as shown in the following pseudo code:
class Test{cls.__name__}{cmd}(_TestXenHypervisor, unittest.TestCase):
TARGET = {cls}
CMD = {cmd}
HVNAME = {Hypervisor name retrieved using class}
@type cls: class
@param cls: Hypervisor class to be tested
@type cmd: string
@param cmd: Hypervisor command
@rtype: tuple
@return: Class name and class object (not instance)
"""
name = "Test%sCmd%s" % (cls.__name__, cmd.title())
bases = (_TestXenHypervisor, unittest.TestCase)
hvname = HVCLASS_TO_HVNAME[cls]
return (name, type(name, bases, dict(TARGET=cls, CMD=cmd, HVNAME=hvname)))
# Create test classes programmatically instead of manually to reduce the risk
# of forgetting some combinations
for cls in [hv_xen.XenPvmHypervisor, hv_xen.XenHvmHypervisor]:
for cmd in constants.KNOWN_XEN_COMMANDS:
(name, testcls) = _MakeTestClass(cls, cmd)
assert name not in locals()
locals()[name] = testcls
if __name__ == "__main__":
testutils.GanetiTestProgram()
| {
"content_hash": "1761bb01de9879cda89acb3d8c217548",
"timestamp": "",
"source": "github",
"line_count": 993,
"max_line_length": 79,
"avg_line_length": 33.09969788519638,
"alnum_prop": 0.6314956796884508,
"repo_name": "apyrgio/ganeti",
"id": "73b40cbf7135cea71c457d43be1e6c2191eb0f1d",
"size": "34232",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/py/ganeti.hypervisor.hv_xen_unittest.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "63"
},
{
"name": "Haskell",
"bytes": "2332665"
},
{
"name": "Python",
"bytes": "5772348"
},
{
"name": "Shell",
"bytes": "103874"
}
],
"symlink_target": ""
} |
"""
I/O for DOLFIN's XML format, cf.
<https://people.sc.fsu.edu/~jburkardt/data/dolfin_xml/dolfin_xml.html>.
"""
import os
import pathlib
import re
from xml.etree import ElementTree as ET
import numpy as np
from .._common import warn
from .._exceptions import ReadError, WriteError
from .._helpers import register_format
from .._mesh import Mesh
def _read_mesh(filename):
dolfin_to_meshio_type = {"triangle": ("triangle", 3), "tetrahedron": ("tetra", 4)}
# Use iterparse() to avoid loading the entire file via parse(). iterparse()
# allows to discard elements (via clear()) after they have been processed.
# See <https://stackoverflow.com/a/326541/353337>.
dim = None
points = None
keys = None
cell_type = None
num_nodes_per_cell = None
cells = None
cell_tags = None
for event, elem in ET.iterparse(filename, events=("start", "end")):
if event == "end":
continue
if elem.tag == "dolfin":
# Don't be too strict with the assertion. Some mesh files don't have the
# proper tags.
# assert elem.attrib['nsmap'] \
# == '{\'dolfin\': \'https://fenicsproject.org/\'}'
pass
elif elem.tag == "mesh":
dim = int(elem.attrib["dim"])
cell_type, num_nodes_per_cell = dolfin_to_meshio_type[
elem.attrib["celltype"]
]
cell_tags = [f"v{i}" for i in range(num_nodes_per_cell)]
elif elem.tag == "vertices":
if dim is None:
raise ReadError("Expected `mesh` before `vertices`")
points = np.empty((int(elem.attrib["size"]), dim))
keys = ["x", "y"]
if dim == 3:
keys += ["z"]
elif elem.tag == "vertex":
if points is None or keys is None:
raise ReadError("Expected `vertices` before `vertex`")
k = int(elem.attrib["index"])
points[k] = [elem.attrib[key] for key in keys]
elif elem.tag == "cells":
if cell_type is None or num_nodes_per_cell is None:
raise ReadError("Expected `mesh` before `cells`")
cells = [
(
cell_type,
np.empty((int(elem.attrib["size"]), num_nodes_per_cell), dtype=int),
)
]
elif elem.tag in ["triangle", "tetrahedron"]:
k = int(elem.attrib["index"])
assert cells is not None
assert cell_tags is not None
cells[0][1][k] = [elem.attrib[t] for t in cell_tags]
else:
warn(f"Unknown entry {elem.tag}. Ignoring.")
elem.clear()
return points, cells, cell_type
def _read_cell_data(filename):
dolfin_type_to_numpy_type = {
"int": np.dtype("int"),
"float": np.dtype("float"),
"uint": np.dtype("uint"),
}
cell_data = {}
dir_name = pathlib.Path(filename).resolve().parent
# Loop over all files in the same directory as `filename`.
basename = pathlib.Path(filename).stem
for f in os.listdir(dir_name):
# Check if there are files by the name "<filename>_*.xml"; if yes,
# extract the * pattern and make it the name of the data set.
out = re.match(f"{basename}_([^\\.]+)\\.xml", f)
if not out:
continue
name = out.group(1)
parser = ET.XMLParser()
tree = ET.parse((dir_name / f).as_posix(), parser)
root = tree.getroot()
mesh_functions = list(root)
if len(mesh_functions) != 1:
raise ReadError("Can only handle one mesh function")
mesh_function = mesh_functions[0]
if mesh_function.tag != "mesh_function":
raise ReadError()
size = int(mesh_function.attrib["size"])
dtype = dolfin_type_to_numpy_type[mesh_function.attrib["type"]]
data = np.empty(size, dtype=dtype)
for child in mesh_function:
if child.tag != "entity":
raise ReadError()
idx = int(child.attrib["index"])
data[idx] = child.attrib["value"]
if name not in cell_data:
cell_data[name] = []
cell_data[name].append(data)
return cell_data
def read(filename):
points, cells, _ = _read_mesh(filename)
cell_data = _read_cell_data(filename)
return Mesh(points, cells, cell_data=cell_data)
def _write_mesh(filename, points, cell_type, cells):
stripped_cells = [c for c in cells if c.type == cell_type]
meshio_to_dolfin_type = {"triangle": "triangle", "tetra": "tetrahedron"}
if any(c.type != cell_type for c in cells):
discarded_cell_types = {c.type for c in cells if c.type != cell_type}
warn(
"DOLFIN XML can only handle one cell type at a time. "
+ f"Using {cell_type}, discarding {', '.join(discarded_cell_types)}.",
)
dim = points.shape[1]
if dim not in [2, 3]:
raise WriteError(f"Can only write dimension 2, 3, got {dim}.")
coord_names = ["x", "y"]
if dim == 3:
coord_names += ["z"]
with open(filename, "w") as f:
f.write("<dolfin nsmap=\"{'dolfin': 'https://fenicsproject.org/'}\">\n")
ct = meshio_to_dolfin_type[cell_type]
f.write(f' <mesh celltype="{ct}" dim="{dim}">\n')
num_points = len(points)
f.write(f' <vertices size="{num_points}">\n')
for idx, point in enumerate(points):
s = " ".join(f'{xyz}="{p}"' for xyz, p in zip("xyz", point))
f.write(f' <vertex index="{idx}" {s} />\n')
f.write(" </vertices>\n")
num_cells = 0
for c in stripped_cells:
num_cells += len(c.data)
f.write(f' <cells size="{num_cells}">\n')
idx = 0
for cell_block in stripped_cells:
type_string = meshio_to_dolfin_type[cell_block.type]
for cell in cell_block.data:
s = " ".join(f'v{k}="{c}"' for k, c in enumerate(cell))
f.write(f' <{type_string} index="{idx}" {s} />\n')
idx += 1
f.write(" </cells>\n")
f.write(" </mesh>\n")
f.write("</dolfin>")
def _numpy_type_to_dolfin_type(dtype):
types = {
"int": [np.int8, np.int16, np.int32, np.int64],
"uint": [np.uint8, np.uint16, np.uint32, np.uint64],
"float": [np.float16, np.float32, np.float64],
}
for key, numpy_types in types.items():
for numpy_type in numpy_types:
if np.issubdtype(dtype, numpy_type):
return key
raise WriteError("Could not convert NumPy data type to DOLFIN data type.")
def _write_cell_data(filename, dim, cell_data):
dolfin = ET.Element("dolfin", nsmap={"dolfin": "https://fenicsproject.org/"})
mesh_function = ET.SubElement(
dolfin,
"mesh_function",
type=_numpy_type_to_dolfin_type(cell_data.dtype),
dim=str(dim),
size=str(len(cell_data)),
)
for k, value in enumerate(cell_data):
ET.SubElement(mesh_function, "entity", index=str(k), value=repr(value))
tree = ET.ElementTree(dolfin)
tree.write(filename)
def write(filename, mesh):
warn("DOLFIN XML is a legacy format. Consider using XDMF instead.")
if any("tetra" == c.type for c in mesh.cells):
cell_type = "tetra"
elif any("triangle" == c.type for c in mesh.cells):
cell_type = "triangle"
else:
raise WriteError(
"DOLFIN XML only supports triangles and tetrahedra. "
"Consider using XDMF instead."
)
_write_mesh(filename, mesh.points, cell_type, mesh.cells)
for name, lst in mesh.cell_data.items():
for data in lst:
fname = os.path.splitext(filename)[0]
cell_data_filename = f"{fname}_{name}.xml"
dim = 2 if mesh.points.shape[1] == 2 or all(mesh.points[:, 2] == 0) else 3
_write_cell_data(cell_data_filename, dim, np.array(data))
register_format("dolfin-xml", [".xml"], read, {"dolfin-xml": write})
| {
"content_hash": "73604766621ec7af4e0f12ea62ee72f4",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 88,
"avg_line_length": 34.05882352941177,
"alnum_prop": 0.5546508758943992,
"repo_name": "nschloe/meshio",
"id": "b9af204760c39553f52ac8ace32ea9f5d529f07d",
"size": "8106",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/meshio/dolfin/_dolfin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "124"
},
{
"name": "Python",
"bytes": "639630"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.logz import MicrosoftLogz
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-logz
# USAGE
python monitors_list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MicrosoftLogz(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.monitors.list_by_subscription()
for item in response:
print(item)
# x-ms-original-file: specification/logz/resource-manager/Microsoft.Logz/stable/2020-10-01/examples/Monitors_List.json
if __name__ == "__main__":
main()
| {
"content_hash": "241f04b793fb560184010c221de98ef2",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 118,
"avg_line_length": 32.096774193548384,
"alnum_prop": 0.7286432160804021,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c349af7b3659e7ec59a057772845821557d2ad4c",
"size": "1463",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/logz/azure-mgmt-logz/generated_samples/monitors_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import unittest
from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
def make_mock_crash_report_darwin(process_name, pid):
return """Process: {process_name} [{pid}]
Path: /Volumes/Data/slave/x-release-tests/build/WebKitBuild/Release/{process_name}
Identifier: {process_name}
Version: ??? (???)
Code Type: X86-64 (Native)
Parent Process: Python [2578]
Date/Time: 2011-12-07 13:27:34.816 -0800
OS Version: Mac OS X 10.6.8 (10K549)
Report Version: 6
Interval Since Last Report: 1660 sec
Crashes Since Last Report: 1
Per-App Crashes Since Last Report: 1
Anonymous UUID: 507D4EEB-9D70-4E2E-B322-2D2F0ABFEDC0
Exception Type: EXC_BREAKPOINT (SIGTRAP)
Exception Codes: 0x0000000000000002, 0x0000000000000000
Crashed Thread: 0
Dyld Error Message:
Library not loaded: /Volumes/Data/WebKit-BuildSlave/x-release/build/WebKitBuild/Release/WebCore.framework/Versions/A/WebCore
Referenced from: /Volumes/Data/slave/x-release/build/WebKitBuild/Release/WebKit.framework/Versions/A/WebKit
Reason: image not found
Binary Images:
0x7fff5fc00000 - 0x7fff5fc3be0f dyld 132.1 (???) <29DECB19-0193-2575-D838-CF743F0400B2> /usr/lib/dyld
System Profile:
Model: Xserve3,1, BootROM XS31.0081.B04, 8 processors, Quad-Core Intel Xeon, 2.26 GHz, 6 GB, SMC 1.43f4
Graphics: NVIDIA GeForce GT 120, NVIDIA GeForce GT 120, PCIe, 256 MB
Memory Module: global_name
Network Service: Ethernet 2, Ethernet, en1
PCI Card: NVIDIA GeForce GT 120, sppci_displaycontroller, MXM-Slot
Serial ATA Device: OPTIARC DVD RW AD-5670S
""".format(process_name=process_name, pid=pid)
class CrashLogsTest(unittest.TestCase):
def test_find_log_darwin(self):
if not SystemHost().platform.is_mac():
return
older_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28528)
mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28530)
newer_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28529)
other_process_mock_crash_report = make_mock_crash_report_darwin('FooProcess', 28527)
misformatted_mock_crash_report = 'Junk that should not appear in a crash report' + \
make_mock_crash_report_darwin('DumpRenderTree', 28526)[200:]
files = {
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150718_quadzen.crash': older_mock_crash_report,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash': mock_crash_report,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150720_quadzen.crash': newer_mock_crash_report,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150721_quadzen.crash': None,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150722_quadzen.crash':
other_process_mock_crash_report,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150723_quadzen.crash':
misformatted_mock_crash_report,
}
filesystem = MockFileSystem(files)
crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
log = crash_logs.find_newest_log("DumpRenderTree")
self.assertMultiLineEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28529)
self.assertMultiLineEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28530)
self.assertMultiLineEqual(log, mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28531)
self.assertIsNone(log)
log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
self.assertIsNone(log)
def bad_read(path):
raise IOError('IOError: No such file or directory')
def bad_mtime(path):
raise OSError('OSError: No such file or directory')
filesystem.read_text_file = bad_read
log = crash_logs.find_newest_log("DumpRenderTree", 28531, include_errors=True)
self.assertIn('IOError: No such file or directory', log)
filesystem = MockFileSystem(files)
crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
filesystem.mtime = bad_mtime
log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0, include_errors=True)
self.assertIn('OSError: No such file or directory', log)
| {
"content_hash": "f1275f109dbe1c6bb9690c097af64bbd",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 129,
"avg_line_length": 48.74226804123711,
"alnum_prop": 0.7057952622673435,
"repo_name": "Samsung/ChromiumGStreamerBackend",
"id": "9822340a46e4eef2ba395d05a5ab4d164c8f1b6e",
"size": "6049",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import platform
import subprocess
def clear_screen():
if platform.system() == 'Windows':
subprocess.call('cls')
else:
subprocess.call('clear')
| {
"content_hash": "9575de309792901a7d46700a7e49e7ec",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 38,
"avg_line_length": 21,
"alnum_prop": 0.6369047619047619,
"repo_name": "inglesp/prescons",
"id": "a6a8611ca2e7e91a20f686f6ed190377b8e0da23",
"size": "168",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/prescons/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8216"
}
],
"symlink_target": ""
} |
"""Unit tests for tfdbg v2 dumping callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import socket
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import models
from tensorflow.python.keras.applications import mobilenet_v2
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _create_simple_recurrent_keras_model(input_shape):
"""Create a simple tf.keras model containing a recurrent layer for testing."""
model = models.Sequential()
model.add(recurrent_v2.LSTM(
10,
input_shape=input_shape,
kernel_initializer="zeros",
recurrent_initializer="zeros"))
model.add(core.Dense(1, kernel_initializer="zeros"))
model.compile(loss="mse", optimizer="sgd")
return model
_host_name = socket.gethostname()
_current_file_full_path = os.path.abspath(__file__)
class TracingCallbackTest(
dumping_callback_test_lib.DumpingCallbackTestBase, parameterized.TestCase):
def setUp(self):
super(TracingCallbackTest, self).setUp()
self.dump_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root, ignore_errors=True)
dumping_callback.disable_dump_debug_info()
super(TracingCallbackTest, self).tearDown()
def _verifyStackFrames(self, stack_frames):
"""Verify the correctness of the stack frames.
Currently, it simply asserts that the current file is found in the stack
frames.
TODO(cais): Perhaps implement a stricter check later.
Args:
stack_frames: The stack frames to verify.
"""
self.assertTrue([
frame for frame in stack_frames if frame[0] == _current_file_full_path])
def _expectedDefaultDeviceName(self):
gpu_name = test_util.gpu_device_name()
if gpu_name:
return "/job:localhost/replica:0/task:0" + gpu_name
else:
return "/job:localhost/replica:0/task:0/device:CPU:0"
def testInvalidTensorDebugModeCausesError(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*"
r"Valid options.*NO_TENSOR.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NONSENSICAL")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FulHealth", "FULL_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testEnableDumpDebugInfoLogsTensorDebugModeAsStringName(self,
tensor_debug_mode):
log_messages = []
def fake_logging_info(*args):
log_messages.append(args)
with test.mock.patch.object(
tf_logging, "info", side_effect=fake_logging_info):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
self.assertLen(log_messages, 1)
self.assertIn(self.dump_root, log_messages[0])
self.assertIn(tensor_debug_mode, log_messages[0])
def testDisablingTracingCallbackWithoutEnablingFirstIsTolerated(self):
dumping_callback.disable_dump_debug_info()
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullHealth", "FULL_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testPureEagerOpExecution(self, tensor_debug_mode):
"""Test dumping data from eager op execution: float32."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = constant_op.constant(10.0)
zero = constant_op.constant(0.0)
one = constant_op.constant(1.0)
two = constant_op.constant(2.0)
three = constant_op.constant(3.0)
# Use Collatz conjecture as a test case.
while x > one:
if math_ops.equal(x % two, zero):
x = x / two
else:
x = x * three + one
writer.FlushNonExecutionFiles()
self._readAndCheckMetadataFile()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Before FlushExecutionFiles() is called, the .execution file should be
# empty.
self.assertFalse(reader.executions())
# After the flushing, the .execution file should hold the appropriate
# contents.
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
prev_wall_time = 1
executed_op_types = []
tensor_values = collections.defaultdict(lambda: [])
for execution in executions:
self.assertGreaterEqual(execution.wall_time, prev_wall_time)
prev_wall_time = execution.wall_time
executed_op_types.append(execution.op_type)
# Check the device name.
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
self.assertLen(execution.output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(execution.output_tensor_device_ids[0]),
self._expectedDefaultDeviceName(),
"Unexpected device name from eager op %s" % execution.op_type)
# No graph IDs should have been logged for eager op executions.
self.assertFalse(execution.graph_id)
self.assertTrue(execution.input_tensor_ids)
self.assertTrue(execution.output_tensor_ids)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "NO_TENSOR":
# Due to the NO_TENSOR tensor debug mode, tensor_protos ought to
# be empty.
self.assertFalse(execution.debug_tensor_values)
elif tensor_debug_mode == "CURT_HEALTH":
self.assertLen(execution.debug_tensor_values, 1)
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: 0 means there is no inf or nan.
self.assertAllClose(execution.debug_tensor_values, [[-1.0, 0.0]])
elif tensor_debug_mode == "CONCISE_HEALTH":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: each scalar tensor has 1 element.
# Remaining elements: no -inf, inf or nan in these
self.assertAllClose(
execution.debug_tensor_values, [[-1, 1, 0, 0, 0]])
elif tensor_debug_mode == "FULL_HEALTH":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
self.assertAllClose(
execution.debug_tensor_values,
[[-1, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1]])
elif tensor_debug_mode == "SHAPE":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: dtype enum value (float32).
# 3rd element: rank (scalar).
# 4th element: element count (4).
# Remaining elements: shape at fixed length (6).
self.assertAllClose(execution.debug_tensor_values,
[[-1, 1, 0, 1, 0, 0, 0, 0, 0, 0]])
elif tensor_debug_mode == "FULL_TENSOR":
tensor_values[execution.op_type].append(
reader.execution_to_tensor_values(execution)[0])
host_name, stack_frames = reader.read_execution_stack_trace(execution)
self.assertEqual(host_name, _host_name)
self._verifyStackFrames(stack_frames)
if tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values["Greater"], [1, 1, 1, 1, 1, 1, 0])
self.assertAllClose(tensor_values["RealDiv"], [5, 8, 4, 2, 1])
self.assertAllClose(tensor_values["Mul"], [15])
self.assertAllClose(tensor_values["AddV2"], [16])
self.assertEqual(
executed_op_types,
[
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 10 --> 5
"Greater",
"FloorMod",
"Equal",
"Mul",
"AddV2", # 5 --> 16
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 16 --> 8
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 8 --> 4
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 4 --> 2
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 2 --> 1
"Greater"
])
# Due to the pure eager op execution, the .graph file and the
# .graph_execution_traces file ought to be empty.
self.assertFalse(reader.outermost_graphs())
self.assertEqual(reader.num_graph_execution_traces(), 0)
@parameterized.named_parameters(
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testModesSummarizingBadNumericalValue(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return (x + y) / (x - y)
x = np.array([-3, -1, 0, 0, 1, 1, 1, 2], dtype=np.float16)
y = np.array([2, -1, 0, 0, 1, 1, 1, 3], dtype=np.float16)
# x - y = [-5, 0, 0, 0, 0, 0, 0, -1]
# (x + y) / (x - y) = [0.2, -inf, nan, nan, inf, inf, inf, -5].
self.evaluate(func(x, y))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertCountEqual(
executed_op_types,
["Placeholder", "Placeholder", "AddV2", "Sub", "RealDiv"])
if tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: indicates if there is any inf or nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1])
else:
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: element count (8).
# Remaining 3 elements: The counts of -inf, inf and nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 1, 3, 2])
else:
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 0, 0, 0])
elif tensor_debug_mode == "FULL_HEALTH":
for trace in graph_exec_traces:
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, -1, 19, 1, 8, 1, 3, 2, 1, 0, 1])
elif trace.op_type == "Sub":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, -1, 19, 1, 8, 0, 0, 0, 2, 6, 0])
else: # SHAPE.
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float16 = 19).
# 3rd element: rank (1)
# 4th element: element count (8).
# Remaining elements: shape at fixed length (6).
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 19, 1, 8, 8, 0, 0, 0, 0, 0])
@parameterized.named_parameters(
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testBooleanTensors(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return math_ops.logical_not(math_ops.logical_and(x, y))
x = np.array([[False, False], [True, True]], dtype=np.bool)
y = np.array([[False, True], [False, True]], dtype=np.bool)
self.assertAllEqual(
self.evaluate(func(x, y)), [[True, True], [True, False]])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertEqual(
executed_op_types,
["Placeholder", "Placeholder", "LogicalAnd", "LogicalNot"])
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (bool).
# 3rd element: rank (2).
# 4th element: element count (4).
# Remaining elements: shape at fixed length.
self.assertAllClose(
trace.debug_tensor_value, [tensor_id, 10, 2, 4, 2, 2, 0, 0, 0, 0])
def testListingSourceFiles(self):
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
# Run a simple eager execution event, so that the source files are dumped.
self.assertAllClose(math_ops.truediv(7.0, 1.0 / 6.0), 42.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
source_file_list = reader.source_file_list()
self.assertIsInstance(source_file_list, tuple)
for item in source_file_list:
self.assertIsInstance(item, tuple)
self.assertLen(item, 2)
self.assertIn((_host_name, _current_file_full_path), source_file_list)
def testReadingSourceLines(self):
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
# Run a simple eager execution event, so that the source-file contents are
# dumped.
self.assertAllClose(math_ops.truediv(7.0, 1.0 / 6.0), 42.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
with open(_current_file_full_path, "rt") as f:
file_lines = f.read().split("\n")
self.assertEqual(
reader.source_lines(_host_name, _current_file_full_path), file_lines)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testNestedFunctionExecutionWithoutControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(sin1p_log_sum(x, y), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
outermost_graphs = reader.outermost_graphs()
self.assertLen(outermost_graphs, 1)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, so doesn't get logged to the
# .execution file.
executions = reader.executions()
self.assertLen(executions, 1)
self.assertIn("sin1p_log_sum", executions[0].op_type)
# Get the executed graph and verify its identity and inner graph.
graph = reader.graph_by_id(executions[0].graph_id)
self.assertEqual(graph.name, "sin1p_log_sum")
self.assertLen(graph.inner_graph_ids, 1)
inner_graph = reader.graph_by_id(graph.inner_graph_ids[0])
self.assertEqual(inner_graph.name, "log_sum")
# Check device names.
self.assertLen(executions[0].output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(executions[0].output_tensor_device_ids[0]),
self._expectedDefaultDeviceName())
self.assertIn(self._expectedDefaultDeviceName(),
set(reader.device_name_map().values()))
# Verify the recorded graph-building history.
placeholder_op_digests = reader.graph_op_digests(op_type="Placeholder")
add_op_digests = reader.graph_op_digests(op_type="AddV2")
self.assertLen(add_op_digests, 2)
self.assertEqual(
reader.graph_by_id(add_op_digests[0].graph_id).name, "log_sum")
self.assertEqual(
reader.graph_by_id(add_op_digests[1].graph_id).name, "sin1p_log_sum")
log_op_digests = reader.graph_op_digests(op_type="Log")
self.assertLen(log_op_digests, 1)
self.assertEqual(
reader.graph_by_id(log_op_digests[0].graph_id).name, "log_sum")
sin_op_digests = reader.graph_op_digests(op_type="Sin")
self.assertLen(sin_op_digests, 1)
self.assertEqual(
reader.graph_by_id(sin_op_digests[0].graph_id).name, "sin1p_log_sum")
# Verify the output tensor IDs and the stack traces.
for op_digest in add_op_digests + log_op_digests + sin_op_digests:
# These are all single-output ops.
self.assertLen(op_digest.output_tensor_ids, 1)
self.assertGreaterEqual(op_digest.output_tensor_ids[0], 0)
_, stack_frames = reader.read_graph_op_creation_stack_trace(op_digest)
self._verifyStackFrames(stack_frames)
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [digest.op_type for digest in graph_exec_traces]
self.assertEqual(
executed_op_types,
["Placeholder", "Placeholder", "Placeholder", "Placeholder",
"AddV2", "Log", "AddV2", "Sin"])
placeholder_traces = graph_exec_traces[:4]
non_placeholder_traces = graph_exec_traces[4:]
# Verify the graph ID stack of each op.
# The outer function's 1st Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[0].graph_ids[-1]).name,
"sin1p_log_sum")
# The outer function's 2nd Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[1].graph_ids[-1]).name,
"sin1p_log_sum")
# The inner function's 1st Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[2].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(placeholder_traces[2].graph_ids[-2]).name,
"sin1p_log_sum")
# The inner function's 2nd Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[3].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(placeholder_traces[3].graph_ids[-2]).name,
"sin1p_log_sum")
# 1st AddV2 op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[0].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[0].graph_ids[-2]).name,
"sin1p_log_sum")
# Log op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[1].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[1].graph_ids[-2]).name,
"sin1p_log_sum")
# 2nd AddV2 op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[2].graph_ids[-1]).name,
"sin1p_log_sum")
# Sin op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[3].graph_ids[-1]).name,
"sin1p_log_sum")
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for trace in graph_exec_traces:
self.assertIsNone(trace.debug_tensor_value)
elif tensor_debug_mode == "CURT_HEALTH":
# Test the association between graph exec and prior graph building.
# In each case, the 1st element of debug_tensor_value is the ID of the
# symbolic tenosr and the 2nd element is a zero indicating there is no
# inf or nan.
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0], 0.0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0], 0.0])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0], 0.0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0], 0.0])
self.assertAllClose( # 1st AddV2 op.
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 0.0])
self.assertAllClose( # Log op.
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 0.0])
self.assertAllClose( # 2nd AddV2 op.
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 0.0])
self.assertAllClose( # Sin op.
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 0.0])
elif tensor_debug_mode == "CONCISE_HEALTH":
# 1st element: tensor_id.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0], 1., 0., 0., 0.])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0], 1., 0., 0., 0.])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0], 1., 0., 0., 0.])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0], 1., 0., 0., 0.])
# 1st AddV2 op.
self.assertAllClose(
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Log op.
self.assertAllClose(
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# 2nd AddV2 op.
self.assertAllClose(
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Sin op.
self.assertAllClose(
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
elif tensor_debug_mode == "FULL_HEALTH":
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# 1st AddV2 op.
self.assertAllClose(
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# Log op.
self.assertAllClose(
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# 2nd AddV2 op.
self.assertAllClose(
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# Sin op.
self.assertAllClose(
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
elif tensor_debug_mode == "SHAPE":
# 1st element: tensor_id.
# 2nd element: dtype (float32).
# 3rd element: rank (scalar).
# 4th element: element count (1).
# Remaining elements: shape padded to fixed length (6).
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
# 1st AddV2 op.
self.assertAllClose(
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Log op.
self.assertAllClose(
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# 2nd AddV2 op.
self.assertAllClose(
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Sin op.
self.assertAllClose(
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
else: # FULL_TENSOR.
placeholder_full_tensor_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in placeholder_traces]
self.assertAllClose(placeholder_full_tensor_values[0], x) # Input x.
self.assertAllClose(placeholder_full_tensor_values[1], y) # Input y.
self.assertAllClose(placeholder_full_tensor_values[2], x) # Input x.
self.assertAllClose(placeholder_full_tensor_values[3], y) # Input y.
non_placeholder_full_tensor_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in non_placeholder_traces]
self.assertAllClose(
non_placeholder_full_tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(
non_placeholder_full_tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
non_placeholder_full_tensor_values[2],
np.log(5.0) + 1.0) # 2nd AddV2 op.
self.assertAllClose(
non_placeholder_full_tensor_values[3],
np.sin(np.log(5.0) + 1.0)) # Sin op.
def testCapturingExecutedGraphIdsOfTwoCompilationsOfSameFunction(self):
"""Test correct executed IDs of two FuncGraphs from the same Py function."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def ceil_times_two(x):
return math_ops.ceil(x) * 2.0
x_float32 = np.array(3.5, dtype=np.float32)
x_float64 = np.array(4.5, dtype=np.float64)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
def testCapturingExecutedGraphIdsOfDuplicateFunctionNames(self):
"""Two FuncGraphs compiled from Python functions with identical names."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
class TestClass(object):
@def_function.function
def ceil_times_two(self, x):
return math_ops.ceil(x) * 2.0
# The `ceil_times_two` method of the two objects will be compiled
# into separate FuncGraphs.
test_object_1 = TestClass()
test_object_2 = TestClass()
x = np.array(3.5, dtype=np.float32)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
@parameterized.named_parameters(
("AddV2", "AddV2"),
("Log", "Log"),
("AddV2AndLog", "(AddV2|Log)"),
)
@test_util.run_in_graph_and_eager_modes
def testOpRegex(self, op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
op_regex=op_regex)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(
self.evaluate(sin1p_log_sum(x, y)), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("AddV2", op_types)
self.assertIn("Log", op_types)
self.assertIn("Sin", op_types)
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if op_regex == "AddV2":
self.assertEqual(executed_op_types, ["AddV2", "AddV2"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(
tensor_values[1], np.log(5.0) + 1.0) # 2nd AddV2 op.
elif op_regex == "Log":
self.assertEqual(executed_op_types, ["Log"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], np.log(5.0)) # Log op.
else: # "(AddV2|Log)"
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
def testIncorrectTensorDTypeArgFormatLeadsToError(self):
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*\{\}"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes=dict())
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes="float32")
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_dtypes=dtypes.float32)
with self.assertRaises(TypeError):
dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes=[
lambda dtype: dtype.is_floating, lambda dtype: dtype.is_integer])
@parameterized.named_parameters(
("float", [dtypes.float32], None),
("float_only_sum", ["float32"], "Sum"),
("float_no_sum", (dtypes.float32,), "(?!Sum)"),
("int", [dtypes.int32], None),
("int_via_lambda", lambda dtype: dtype.is_integer, None),
("exclude_Sum", None, "(?!Sum)"),
("All", None, None),
)
@test_util.run_in_graph_and_eager_modes
def testTensorDTypesAndOpRegexFilters(self,
tensor_dtypes,
op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
tensor_dtypes=tensor_dtypes,
op_regex=op_regex)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
y, indices = self.evaluate(unique_sum(xs))
self.assertAllClose(y, 17.)
self.assertAllEqual(indices, [0, 1, 2, 3, 0])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests
if digest.op_type != "Placeholder"]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests
if digest.op_type != "Placeholder"]
if tensor_dtypes == [dtypes.float32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique", "Sum"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllClose(tensor_values[1], 17.) # Sum.
elif tensor_dtypes == ["float32"] and op_regex == "Sum":
self.assertEqual(executed_op_types, ["Sum"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], 17.) # Sum.
elif tensor_dtypes == (dtypes.float32,) and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
elif tensor_dtypes == [dtypes.int32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif callable(tensor_dtypes) and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif not tensor_dtypes and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique", "Unique"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
else: # "All".
self.assertEqual(executed_op_types, ["Unique", "Unique", "Sum"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
self.assertAllClose(tensor_values[2], 17) # Sum.
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testFunctionExecutionWithControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0
i += 1
return x
x = constant_op.constant(0.5, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 8.0)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("Less", op_types)
self.assertIn("Mul", op_types)
self.assertIn("AddV2", op_types)
# Before FlushExecutionFiles() is called, the .execution and
# .graph_execution_traces files should be both empty.
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
# TODO(cais): Backport execution instrumentation to tf.Session.
writer.FlushExecutionFiles()
# After the flushing, the .execution file should hold the appropriate
# contents.
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
executions = reader.executions()
self.assertLen(executions, 1)
executed_op_types = [execution.op_type for execution in executions]
self.assertIn("iterative_doubling", executions[0].op_type)
execution = executions[0]
self.assertLen(execution.input_tensor_ids, 2)
self.assertLen(execution.output_tensor_ids, 1)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "FULL_TENSOR":
tensor_values = reader.execution_to_tensor_values(execution)
self.assertAllClose(tensor_values, [8.0])
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
if tensor_debug_mode != "CURT_HEALTH":
# Less outputs a boolean tensor, which is not tracked under CURT_HEALTH.
# The Less op should have been executed 5 times.
self.assertEqual(executed_op_types.count("Less"), 5)
# The last executed op should be Less.
self.assertEqual(executed_op_types[-1], "Less")
# AddV2 produces an int tensor, which is not tracked under CURT_HEALTH.
# The AddV2 op should have been run, but we refrain from asserting on
# how many times it's executed.
self.assertIn("AddV2", executed_op_types)
for trace in graph_exec_traces:
self.assertEqual(trace.output_slot, 0)
# The Mul op should have been executed 4 times.
self.assertEqual(executed_op_types.count("Mul"), 4)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor_id; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0.0])
elif tensor_debug_mode == "FULL_TENSOR":
less_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Less"]
self.assertAllEqual(less_values, [True, True, True, True, False])
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [1.0, 2.0, 4.0, 8.0])
def testCallingEnableTracingTwiceWithTheSameDumpRootIsIdempotent(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
def testCallingEnableTracingTwiceWithDifferentDumpRootsOverwrites(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
new_dump_root = self.dump_root + "_new_dump_root"
writer = dumping_callback.enable_dump_debug_info(new_dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(new_dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
with debug_events_reader.DebugDataReader(
self.dump_root) as old_dump_root_reader:
old_dump_root_reader.update()
# The old dump root shouldn't have been written to.
self.assertEqual(old_dump_root_reader.num_executions(), 0)
self.assertFalse(old_dump_root_reader.outermost_graphs())
def testCallingEnableRepeatedlyWithDifferentTensorDebugMode(self):
"""Assert calling enable_dump_debug_info() with two tensor-debug modes.
It should lead to overwriting of the previously-configured mode.
"""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def add_1_divide_by_2(x):
return (x + 1.0) / 2.0
self.assertAllClose(add_1_divide_by_2(constant_op.constant(4.0)), 2.5)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
for tensor_value in tensor_values:
# Under NO_TENSOR mode, each tensor is summarized as an empty float32
# array.
self.assertAllEqual(tensor_value, [])
with self.assertRaisesRegexp(
ValueError, r"already.*NO_TENSOR.*FULL_TENSOR.*not be honored"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
def testDisableTracingWorks(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
dumping_callback.disable_dump_debug_info()
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
self.assertFalse(reader.outermost_graphs())
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
def testMultiThreadedExecutionWithSameSetting(self, tensor_debug_mode):
"""Dumping from multiple threads using the same setting."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = variables.Variable(10.0, dtype=dtypes.float32)
y = variables.Variable(3.0, dtype=dtypes.float32)
@def_function.function
def increase_x():
return x.assign_add(y * 2.0)
increase_x()
num_threads = 3
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=increase_x))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# 10 --> 16 --> 22 --> 28 --> 34.
self.assertAllClose(x.read_value(), 34.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
prev_wall_time = 1
for exec_digest in exec_digests:
self.assertGreaterEqual(exec_digest.wall_time, prev_wall_time)
prev_wall_time = exec_digest.wall_time
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertEqual(executed_op_types.count("Mul"), 1 + num_threads)
self.assertEqual(
executed_op_types.count("ReadVariableOp"), 2 * (1 + num_threads))
for trace in graph_exec_traces:
# These are all single-output tensors.
self.assertEqual(trace.output_slot, 0)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1, 0, 0, 0])
elif tensor_debug_mode == "FULL_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
self.assertAllClose(
trace.debug_tensor_value,
[tensor_id, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
elif tensor_debug_mode == "SHAPE":
for trace in graph_exec_traces:
if trace.op_type == "Mul":
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
mul_value = reader.graph_execution_trace_to_tensor_value(trace)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float32).
# 3rd element: rank.
# 4th element: element count.
self.assertAllClose(mul_value, [tensor_id, 1, 0, 1, 0, 0, 0, 0, 0, 0])
elif tensor_debug_mode == "FULL_TENSOR":
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [6.0, 6.0, 6.0, 6.0])
def testMultiThreadedDumpingWithDifferentSettings(self):
dump_root_1 = os.path.join(self.dump_root, "dump_root_1")
dump_root_2 = os.path.join(self.dump_root, "dump_root_2")
v1 = variables.Variable(10.0, dtype=dtypes.float32)
v2 = variables.Variable(3.0, dtype=dtypes.float32)
def add_negative_v1_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_1, tensor_debug_mode="FULL_TENSOR")
# Run in a loop to facilitate interleaving between threads.
for _ in range(3):
v1.assign_add(-(v1 ** 2.0))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
def add_negative_v2_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_2, tensor_debug_mode="FULL_TENSOR")
v2_squared = v2 ** 2.0
# Since dumping is disabled before the Neg op is called, no tensor data
# should be dumped from the op, but this shouldn't affect the dumping of
# the tensor data from the Neg op in `add_negative_v1_squared_to_itself`.
# Both behavior is checked below.
dumping_callback.disable_dump_debug_info()
negative_v2_squared = -v2_squared
v2.assign_add(negative_v2_squared)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# v2 is mutated on a sub-thread.
sub_thread = threading.Thread(target=add_negative_v2_squared_to_itself)
sub_thread.start()
add_negative_v1_squared_to_itself() # v1 is mutated on the main thread.
sub_thread.join()
# 10 - 10 * 10 = -90.
# -90 - (-90 * -90) = -8190.
# -8190 - (-8190 * -8190) = -67084290.
self.assertAllClose(v1.read_value(), -67084290.0)
self.assertAllClose(v2.read_value(), -6.0)
with debug_events_reader.DebugDataReader(dump_root_1) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
negative_v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Neg"]
self.assertAllClose(v1_squared_values, [[100.0], [8100.0], [67076100.0]])
self.assertAllClose(
negative_v1_squared_values, [[-100.0], [-8100.0], [-67076100.0]])
with debug_events_reader.DebugDataReader(dump_root_2) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
executed_op_types = [digest.op_type for digest in exec_digests]
self.assertNotIn("Neg", executed_op_types)
v2_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
self.assertAllClose(v2_squared_values, [[9.0]])
@test_util.run_in_graph_and_eager_modes
def testNestedContextIsCapturedByGraphOpCreationHistory(self):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0 - 1.0
i += 1
return x
x = constant_op.constant(2.0, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
# 2 * 2 - 1 = 3; 3 * 2 - 1 = 5; 5 * 2 - 1 = 9; 9 * 2 - 1 = 17.
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 17.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
less_op_digest = reader.graph_op_digests(op_type="Less")[-1]
mul_op_digest = reader.graph_op_digests(op_type="Mul")[-1]
sub_op_digest = reader.graph_op_digests(op_type="Sub")[-1]
# The Less op is from the while-loop cond context and hence should have
# a different innermost context ID from the mul and sub ops, which are
# both from the while-loop body context.
self.assertNotEqual(less_op_digest.graph_id, mul_op_digest.graph_id)
self.assertNotEqual(less_op_digest.graph_id, sub_op_digest.graph_id)
# The Mul and Sub ops are from the same innermost context.
self.assertEqual(mul_op_digest.graph_id, sub_op_digest.graph_id)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelPredict(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
batch_size = 5
xs = np.ones([batch_size, 3, 4])
self.assertAllClose(model.predict(xs), np.zeros([batch_size, 1]))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
self.assertTrue(reader.executions(digest=True))
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
# These are the ops that we can safely assume to have been executed during
# the model prediction.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
# be an empty float32 tensor.
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
else:
# Refrain from asserting the internal implementation details of the LSTM
# layer.
self.assertTrue(any(
bool(tensor_value.size) for tensor_value in tensor_values))
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelFit(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
xs = np.ones([5, 3, 4])
ys = np.ones([5, 1])
history = model.fit(xs, ys, epochs=3, verbose=0)
self.assertAllClose(
history.history["loss"], [1.0, 0.9603999853134155, 0.9223681688308716])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
if context.executing_eagerly():
exec_digests = reader.executions(digest=True)
self.assertTrue(exec_digests)
if tensor_debug_mode == "NO_TENSOR":
for digest in exec_digests:
tensor_values = reader.execution_to_tensor_values(digest)
for tensor_value in tensor_values:
self.assertEqual(tensor_value, [])
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
# These are the ops that we can safely assume to have been executed during
# the recurrent model's fit() call.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
self.assertTrue(
("SigmoidGrad" in executed_op_types and
"TanhGrad" in executed_op_types or
"CudnnRNNBackprop" in executed_op_types))
if tensor_debug_mode == "NO_TENSOR":
for digest in graph_exec_digests:
tensor_values = reader.graph_execution_trace_to_tensor_value(digest)
for tensor_value in tensor_values:
self.assertEqual(tensor_value, [])
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testMobileNetV2Fit(self, tensor_debug_mode):
"""Test training Keras MobileNetV2 works with dumping."""
# Use a large circular-buffer to make sure we capture all the executed ops.
writer = dumping_callback.enable_dump_debug_info(
self.dump_root,
tensor_debug_mode=tensor_debug_mode,
circular_buffer_size=100000)
model = mobilenet_v2.MobileNetV2(
input_shape=(32, 32, 3), alpha=0.1, weights=None)
y = model.layers[22].output
y = core.Flatten()(y)
y = core.Dense(1)(y)
model = models.Model(inputs=model.inputs, outputs=y)
batch_size = 2
xs = np.zeros([batch_size] + list(model.input_shape[1:]))
ys = np.zeros([batch_size] + list(model.output_shape[1:]))
model.compile(optimizer="sgd", loss="mse")
epochs = 1
history = model.fit(xs, ys, epochs=epochs, verbose=0)
self.assertLen(history.history["loss"], epochs)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
exec_digests = reader.executions(digest=True)
self.assertTrue(exec_digests)
graph_exec_digests = reader.graph_execution_traces()
executed_op_types = [digest.op_type for digest in graph_exec_digests]
# These are the ops that we can safely assume to have been executed during
# the model's fit() call.
self.assertIn("Conv2D", executed_op_types)
self.assertIn("Relu6", executed_op_types)
self.assertIn("Conv2DBackpropFilter", executed_op_types)
self.assertIn("Relu6Grad", executed_op_types)
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
tensor_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "FULL_TENSOR":
conv2d_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests if digest.op_type == "Conv2D"]
self.assertTrue(conv2d_values)
for conv2d_value in conv2d_values:
self.assertGreater(len(conv2d_value.shape), 1)
self.assertEqual(conv2d_value.shape[0], batch_size)
relu6_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests if digest.op_type == "Relu6"]
self.assertTrue(relu6_values)
for relu6_value in relu6_values:
self.assertGreater(len(relu6_value.shape), 1)
self.assertEqual(relu6_value.shape[0], batch_size)
conv2d_bp_filter_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests
if digest.op_type == "Conv2DBackpropFilter"]
self.assertTrue(conv2d_bp_filter_values)
for conv2d_bp_filter_value in conv2d_bp_filter_values:
self.assertGreater(len(conv2d_bp_filter_value.shape), 1)
relu6_grad_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests if digest.op_type == "Relu6Grad"]
self.assertTrue(relu6_grad_values)
for relu6_grad_value in relu6_grad_values:
self.assertGreater(len(relu6_grad_value.shape), 1)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| {
"content_hash": "47624b882c2da253f654440e92368d48",
"timestamp": "",
"source": "github",
"line_count": 1534,
"max_line_length": 80,
"avg_line_length": 43.59452411994785,
"alnum_prop": 0.6327421718455603,
"repo_name": "xzturn/tensorflow",
"id": "abf919190848411b20a7b0204aebda6bc7090137",
"size": "67563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/debug/lib/dumping_callback_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "905803"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82703929"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871773"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1048075"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2139315"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319649"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38461380"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696334"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3678649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Reinforcement Learning (Chapter 21)
"""
from utils import *
import agents
class PassiveADPAgent(agents.Agent):
"""Passive (non-learning) agent that uses adaptive dynamic programming
on a given MDP and policy. [Fig. 21.2]"""
NotImplementedError
class PassiveTDAgent(agents.Agent):
"""Passive (non-learning) agent that uses temporal differences to learn
utility estimates. [Fig. 21.4]"""
NotImplementedError
| {
"content_hash": "5bb76dd764f5ae8ea6cad0bf78d64dac",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 29.066666666666666,
"alnum_prop": 0.7293577981651376,
"repo_name": "ken0nek/Software2",
"id": "51e3a5a9c63d039483db37720a08cfb3906183ad",
"size": "436",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "140714/rl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28252"
},
{
"name": "C++",
"bytes": "14949"
},
{
"name": "CoffeeScript",
"bytes": "147"
},
{
"name": "Emacs Lisp",
"bytes": "2850"
},
{
"name": "Java",
"bytes": "278"
},
{
"name": "JavaScript",
"bytes": "285"
},
{
"name": "PHP",
"bytes": "196"
},
{
"name": "Perl",
"bytes": "184"
},
{
"name": "Python",
"bytes": "400038"
},
{
"name": "Ruby",
"bytes": "776"
},
{
"name": "Shell",
"bytes": "103"
},
{
"name": "TeX",
"bytes": "16617"
}
],
"symlink_target": ""
} |
import ldpy
if __name__ == "__main__":
ldpy = ldpy.Client("http://localhost:8080/ldp")
blog = ldpy.create("http://localhost:8080/ldp", open("data/blog.ttl"), "text/turtle", "blog")
print "LDP Blog created at: ", blog
print ldpy.read(blog)
print
post = ldpy.create(blog, open("data/post.ttl"), "text/turtle", "post")
print "LDP Post created at: ", post
print ldpy.read(post)
print
#print ldpy.read("http://localhost:8080/ldp/foo")
| {
"content_hash": "6f651436466ef4dcc179c1fd08c45f96",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 97,
"avg_line_length": 27.941176470588236,
"alnum_prop": 0.6189473684210526,
"repo_name": "wikier/ldpy",
"id": "dddfa215468084d6b4f0f52c4e12933dd8d7f2cb",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6608"
}
],
"symlink_target": ""
} |
import json
import sys
def main(keys):
with sys.stdin as json_string:
json_dict = json.load(json_string)
for key in keys:
json_dict = json_dict[key]
for key in json_dict:
print key
if __name__ == '__main__':
keys = sys.argv[1:]
main(keys)
| {
"content_hash": "0cf64025febc36e86cf6fb4e011f0267",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 42,
"avg_line_length": 19.066666666666666,
"alnum_prop": 0.5769230769230769,
"repo_name": "scorphus/dotfilesetal",
"id": "38f830e9c1f0e6ec0e361b9a0cfac30bc6e6da7c",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/get_dict_keys.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "1932"
},
{
"name": "CSS",
"bytes": "1203"
},
{
"name": "JavaScript",
"bytes": "1845"
},
{
"name": "PHP",
"bytes": "156"
},
{
"name": "Perl",
"bytes": "225491"
},
{
"name": "Python",
"bytes": "34519"
},
{
"name": "Ruby",
"bytes": "24726"
},
{
"name": "Shell",
"bytes": "140892"
},
{
"name": "Vim script",
"bytes": "616"
}
],
"symlink_target": ""
} |
"""Tests for mlp."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from language.mentionmemory.modules import mlp
class MLPBlockTest(absltest.TestCase):
"""MLP block test."""
input_dim = 16
hidden_dim = 64
dtype = jnp.float32
dropout_rate = 0.1
layer_norm_epsilon = 1e-12
bsz = 4
seq_len = 20
def test_mlp_block(self):
"""Testing mlp block."""
x = jnp.ones(
shape=(self.bsz, self.seq_len, self.input_dim), dtype=self.dtype)
model = mlp.MLPBlock(
input_dim=self.input_dim,
hidden_dim=self.hidden_dim,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
layer_norm_epsilon=self.layer_norm_epsilon,
)
rng = jax.random.PRNGKey(0)
output, _ = model.init_with_output(
rng,
x=x,
deterministic=True,
)
self.assertSequenceEqual(output.shape, x.shape)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "0b70d990f227cc773eec0ab3aa3bc345",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.6228813559322034,
"repo_name": "google-research/language",
"id": "810b5a77d9bd6c6b3e9e21d1cd925c449b6c2854",
"size": "1559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/mentionmemory/modules/mlp_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
import argparse
import os
import re
import sys
import traceback
import urlparse
from metatools.imports import load_entrypoint
from . import utils
from . import tickets
def parse_url(url):
# Parse the URL into scheme, path, and query.
m = re.match(r'^(?:(\w+):)?(.*?)(?:/(.*?))?(?:\?(.*))?$', url)
scheme, netloc, path, query = m.groups()
query = urlparse.parse_qs(query, keep_blank_values=True) if query else {}
# Parse the values.
for k, v in query.items():
if k == 'ids' or k.endswith('_ids'):
v[:] = [int(x) for x in v[0].split(',')] if v[0] else []
continue
if k.endswith('_id'):
v[:] = [int(x) for x in v]
if len(v) == 1 and k not in ('cols', 'column_display_names'):
query[k] = v[0]
# Parse the path into an entrypoint.
m = re.match(r'^([\w.]+:\w+)$', netloc)
if not m:
raise ValueError('entrypoint must be like "package.module:function"; got "%s"' % netloc)
return 1
return m.group(1), query
def dispatch(url, reload=False):
try:
kwargs = {}
entrypoint, kwargs = parse_url(url)
func = load_entrypoint(entrypoint, reload=reload)
return func(**kwargs)
except Exception, e:
# Default value in case there is an error in traceback.format_exc()...
tb = 'ERROR DURING TRACEBACK'
try:
tb = traceback.format_exc()
ticket_id = tickets.get_ticket_for_exception(*sys.exc_info())
tickets.reply_to_ticket(ticket_id, [
('Exception', sys.exc_info()),
('SGAction Kwargs', kwargs or url),
('OS Environment', dict(os.environ)),
], user_id=kwargs.get('user_id'))
utils.alert(
title='Unhandled %s' % type(e).__name__,
message='<pre>%s</pre>\n\nReplied to Ticket %d.' % (tb, ticket_id),
)
return e
except Exception, e2:
utils.alert(
title='Fatal Unhandled %s' % type(e2).__name__,
message='<pre>%s</pre>\n\nDuring handling of the above exception, another exception occurred:\n\n<pre>%s</pre>' % (tb, traceback.format_exc()),
)
return e2
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', choices=['sgactions.dispatch']) # Just to ignore this parameter.
parser.add_argument('--chrome-native', action='store_true') # Fall into the native dispatcher.
parser.add_argument('-f', '--file', action='store_true') # Load the URL from a file.
parser.add_argument('url', nargs='?', default='')
args = parser.parse_args()
if args.chrome_native:
from sgactions.browsers.chrome_native import main as native_main
native_main()
exit()
url = args.url
if args.file:
url = open(args.file).read()
os.unlink(args.file)
sys.stdout = sys.stderr = open('/tmp/sgactions.native.log', 'a')
dispatch(url)
if __name__ == '__main__':
exit(main() or 0)
| {
"content_hash": "089e1de4a349e6dfd76b997618d36dd2",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 159,
"avg_line_length": 28.89814814814815,
"alnum_prop": 0.5523870554309516,
"repo_name": "westernx/sgactions",
"id": "970334b473f6a58e58d7c1a7a60fdc1840962fc4",
"size": "3121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sgactions/dispatch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "57957"
},
{
"name": "JavaScript",
"bytes": "29352"
},
{
"name": "Makefile",
"bytes": "389"
},
{
"name": "Python",
"bytes": "42287"
},
{
"name": "Shell",
"bytes": "3211"
}
],
"symlink_target": ""
} |
class B(Exception):
pass
class C(B):
pass
class D(C):
pass
for cls in [B, C, D]:
try:
raise cls()
except D: # try to match derived before base class
print("D")
except C: # try to match derived before base class
print("C")
except B: # base class, put it last, else see below.
print("B")
for cls in [B, C, D]:
try:
raise cls()
except B: # base clase, so it matches all cases!
print("B")
except D: # never gets here
print("D")
except C: # never gets here
print("C")
exit()
| {
"content_hash": "13cbc6e15e0d9aa24bed7bfa7b431458",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 62,
"avg_line_length": 20.633333333333333,
"alnum_prop": 0.5088852988691438,
"repo_name": "ombt/analytics",
"id": "2b50f4d7f7a6a6a78b1195cea84e559d727e5276",
"size": "639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apex/python/ofc_online_python_tutorial/ch8_error_exceptions/io_exception2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "C#",
"bytes": "167432"
},
{
"name": "CSS",
"bytes": "27541"
},
{
"name": "Dockerfile",
"bytes": "3588"
},
{
"name": "HTML",
"bytes": "1166436"
},
{
"name": "JavaScript",
"bytes": "1217784"
},
{
"name": "Makefile",
"bytes": "2310"
},
{
"name": "PLSQL",
"bytes": "10668"
},
{
"name": "PLpgSQL",
"bytes": "55402"
},
{
"name": "Perl",
"bytes": "33103233"
},
{
"name": "Perl 6",
"bytes": "3349"
},
{
"name": "Python",
"bytes": "748953"
},
{
"name": "R",
"bytes": "1326030"
},
{
"name": "Rebol",
"bytes": "1366"
},
{
"name": "SQLPL",
"bytes": "424169"
},
{
"name": "Shell",
"bytes": "340414"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email_address',
field=models.EmailField(blank=True, max_length=254, null=True, verbose_name='\u90ae\u7bb1'),
),
]
| {
"content_hash": "fb1cdba0918512e09964a5a0b233f229",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 104,
"avg_line_length": 23.5,
"alnum_prop": 0.6028368794326241,
"repo_name": "xn1990/B10",
"id": "96a12e776b5073be278397722a54f66c9c434f1d",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/migrations/0002_auto_20170429_2015.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "92916"
},
{
"name": "HTML",
"bytes": "1453834"
},
{
"name": "JavaScript",
"bytes": "7858"
},
{
"name": "Python",
"bytes": "15679"
}
],
"symlink_target": ""
} |
"""Module used to create a shared/static library from pyJac files.
"""
from __future__ import print_function
import shutil
import re
import os
import subprocess
import sys
import multiprocessing
import platform
from .. import utils
def lib_ext(shared):
"""Returns the appropriate library extension based on the shared flag"""
return '.a' if not shared else '.so'
cmd_compile = dict(c='gcc',
icc='icc',
cuda='nvcc'
)
def cmd_lib(lang, shared):
"""Returns the appropriate compilation command for creation of the library based on the
language and shared flag"""
if lang == 'c':
return ['ar', 'rcs'] if not shared else ['gcc', '-shared']
elif lang == 'cuda':
return ['nvcc', '-lib'] if not shared else ['nvcc', '-shared']
elif lang == 'icc':
return ['ar', 'rcs'] if not shared else ['icc', '-shared']
includes = dict(c=['/usr/local/include/'], icc=['/usr/local/include/'],
cuda=['/usr/local/cuda/include/',
'/usr/local/cuda/samples/common/inc/'
]
)
flags = dict(c=['-std=c99', '-O3', '-mtune=native'],
icc=['-std=c99', '-O3', '-xhost', '-fp-model', 'precise', '-ipo'],
cuda=['-O3', '-arch=sm_20']
)
shared_flags = dict(c=['-fPIC'],
icc=['-fPIC'],
cuda=['-Xcompiler', '"-fPIC"']
)
libs = dict(c=['-lm', '-std=c99'],
cuda=['-lcudart'],
icc=['-m64', '-ipo', '-lm', '-std=c99']
)
def which(file):
"""A substitute for the `which` command, searches the PATH for
a given file"""
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, file)):
return os.path.join(path, file)
return None
def compiler(fstruct):
"""Given a file structure, this method will compile the source file for the
language and options specified
Parameters
----------
fstruct : `file_struct`
An information struct that holds the various compilation options
Returns
-------
success : int
0 if the compilation process was sucessful, -1 otherwise
Notes
-----
Designed to work with a multiprocess compilation workflow
"""
args = [cmd_compile[fstruct.build_lang]]
if fstruct.auto_diff:
args = ['g++']
args.extend(flags[fstruct.build_lang])
if fstruct.auto_diff:
args = [x for x in args if 'std=c99' not in x]
#always use fPIC in case we're building wrapper
args.extend(shared_flags[fstruct.build_lang])
args.extend(fstruct.args)
include = ['-I{}'.format(d) for d in fstruct.i_dirs +
includes[fstruct.build_lang]
]
args.extend(include)
args.extend([
'-{}c'.format('d' if fstruct.lang == 'cuda' else ''),
os.path.join(fstruct.source_dir, fstruct.filename +
utils.file_ext[fstruct.build_lang]
),
'-o', os.path.join(fstruct.obj_dir, os.path.basename(fstruct.filename) + '.o')
])
args = [val for val in args if val.strip()]
try:
print(' '.join(args))
subprocess.check_call(args)
except OSError:
print('Error: Compiler {} not found, generation of pyjac library failed.'.format(args[0]))
sys.exit(-1)
except subprocess.CalledProcessError:
print('Error: compilation failed for ' + fstruct.filename +
utils.file_ext[fstruct.build_lang]
)
return -1
return 0
def get_cuda_path():
"""Returns location of CUDA (nvcc) on the system.
Parameters
----------
None
Returns
-------
cuda_path : str
Path where CUDA (nvcc) is found on the system.
"""
cuda_path = which('nvcc')
if cuda_path is None:
print('nvcc not found!')
sys.exit(-1)
sixtyfourbit = platform.architecture()[0] == '64bit'
cuda_path = os.path.dirname(os.path.dirname(cuda_path))
cuda_path = os.path.join(cuda_path,
'lib{}'.format('64' if sixtyfourbit else '')
)
return cuda_path
def libgen(lang, obj_dir, out_dir, filelist, shared, auto_diff):
"""Create a library from a list of compiled files
Parameters
----------
Parameters
----------
obj_dir : str
Path with object files
out_dir : str
Path to place the library in
lang : {'c', 'cuda'}
Programming language
filelist : List of `str`
The list of object files to include in the library
auto_diff : Optional[bool]
Optional; if ``True``, include autodifferentiation
"""
command = cmd_lib(lang, shared)
if lang == 'cuda':
desc = 'cu'
elif lang == 'c':
if auto_diff:
desc = 'ad'
else:
desc = 'c'
libname = 'lib{}_pyjac'.format(desc)
#remove the old library
if os.path.exists(os.path.join(out_dir, libname + lib_ext(shared))):
os.remove(os.path.join(out_dir, libname + lib_ext(shared)))
if os.path.exists(os.path.join(out_dir, libname + lib_ext(not shared))):
os.remove(os.path.join(out_dir, libname + lib_ext(not shared)))
libname += lib_ext(shared)
if not shared and lang == 'c':
command += [os.path.join(out_dir, libname)]
#add the files
command.extend([os.path.join(obj_dir, os.path.basename(f) + '.o') for f in filelist])
if shared:
command.extend(shared_flags[lang])
if shared or lang == 'cuda':
command += ['-o']
command += [os.path.join(out_dir, libname)]
if lang == 'cuda':
command += ['-L{}'.format(get_cuda_path())]
command.extend(libs[lang])
try:
print(' '.join(command))
subprocess.check_call(command)
except OSError:
print('Error: Compiler {} not found, generation of pyjac library failed.'.format(args[0]))
sys.exit(-1)
except subprocess.CalledProcessError:
print('Error: Generation of pyjac library failed.')
sys.exit(-1)
return libname
class file_struct(object):
"""A simple structure designed to enable multiprocess compilation
"""
def __init__(self, lang, build_lang, filename, i_dirs, args,
source_dir, obj_dir, shared
):
"""
Parameters
----------
lang : str
Compiler to use
build_lang : {'c', 'cuda'}
Programming language
file_name : str
The file to compile
i_dirs : List of str
List of include directorys for compilation
args : List of str
List of additional arguements
source_dir : str
The directory the file is located in
obj_dir : str
The directory to place the compiled object file in
shared : bool
If true, this is creating a shared library
"""
self.lang = lang
self.build_lang = build_lang
self.filename = filename
self.i_dirs = i_dirs
self.args = args
self.source_dir = source_dir
self.obj_dir = obj_dir
self.shared = shared
self.auto_diff=False
def get_file_list(source_dir, pmod, lang, FD=False, AD=False):
"""
Parameters
----------
source_dir : str
Path with source files
pmod : bool
``True`` if pressure dependent reactions present in mechanism
lang : {'c', 'cuda'}
Programming language
FD : Optional[bool]
Optional; if ``True``, include finite difference
AD : Optional[bool]
Optional; if ``True``, include autodifferentiation
Returns
-------
i_dirs : list of `str`
List of include directories
files : list of `str`
List of files
"""
i_dirs = [source_dir]
if AD:
files = ['ad_dydt', 'ad_rxn_rates', 'ad_spec_rates',
'ad_chem_utils', 'ad_jac'
]
if pmod:
files += ['ad_rxn_rates_pres_mod']
return i_dirs, files
files = ['chem_utils', 'dydt', 'spec_rates',
'rxn_rates', 'mechanism', 'mass_mole'
]
if pmod:
files += ['rxn_rates_pres_mod']
if FD:
files += ['fd_jacob']
flists = []
else:
files += ['jacob']
flists = [('jacobs', 'jac_list_{}')]
flists += [('rates', 'rate_list_{}')]
for flist in flists:
try:
with open(os.path.join(source_dir,
flist[0], flist[1].format(lang))
) as file:
vals = file.readline().strip().split(' ')
vals = [os.path.join(flist[0],
f[:f.index(utils.file_ext[lang])]) for f in vals
]
files += vals
i_dirs.append(os.path.join(source_dir, flist[0]))
except:
pass
if lang == 'cuda':
files += ['gpu_memory']
return i_dirs, files
def generate_library(lang, source_dir, obj_dir=None,
out_dir=None, shared=None,
finite_difference=False, auto_diff=False
):
"""Generate shared/static library for pyJac files.
Parameters
----------
lang : {'c', 'cuda'}
Programming language
source_dir : str
Path of folder with pyJac files
obj_dir : Optional[str]
Optional; path of folder to store generated object files
shared : bool
If ``True``, generate shared library (vs. static)
finite_difference : Optional[bool]
If ``True``, include finite differences
auto_diff : bool
If ``True``, include autodifferentiation
Returns
-------
Location of generated library
"""
#check lang
if lang not in flags.keys():
print('Cannot generate library for unknown language {}'.format(lang))
sys.exit(-1)
shared = shared and lang != 'cuda'
if lang == 'cuda' and shared:
print('CUDA does not support linking of shared device libraries.')
sys.exit(-1)
build_lang = lang if lang != 'icc' else 'c'
source_dir = os.path.abspath(os.path.normpath(source_dir))
if obj_dir is None:
obj_dir = os.path.join(os.getcwd(), 'obj')
else:
obj_dir = os.path.abspath(os.path.normpath(obj_dir))
if not os.path.exists(obj_dir):
os.makedirs(obj_dir)
if out_dir is None:
out_dir = os.getcwd()
else:
out_dir = os.path.abspath(os.path.normpath(out_dir))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
obj_dir = os.path.abspath(obj_dir)
out_dir = os.path.abspath(out_dir)
pmod = False
#figure out whether there's pressure mod reactions or not
with open(os.path.join(source_dir,
'mechanism{}'.format(utils.header_ext[build_lang])), 'r'
) as file:
for line in file.readlines():
line = line.strip()
match = re.search(r'\s*#define PRES_MOD_RATES (\d+)', line)
if match is not None:
pmod = int(match.group(1)) > 0
break
#get file lists
i_dirs, files = get_file_list(source_dir, pmod, build_lang,
FD=finite_difference, AD=auto_diff
)
# Compile generated source code
structs = [file_struct(lang, build_lang, f, i_dirs,
(['-DFINITE_DIFF'] if finite_difference else []),
source_dir, obj_dir, shared) for f in files
]
for x in structs:
x.auto_diff=auto_diff
pool = multiprocessing.Pool()
results = pool.map(compiler, structs)
pool.close()
pool.join()
if any(r == -1 for r in results):
sys.exit(-1)
libname = libgen(lang, obj_dir, out_dir, files, shared, auto_diff)
return os.path.join(out_dir, libname)
| {
"content_hash": "97ecf608c304947be908b735f9cfe02e",
"timestamp": "",
"source": "github",
"line_count": 411,
"max_line_length": 98,
"avg_line_length": 29.34549878345499,
"alnum_prop": 0.5485448967747285,
"repo_name": "kyleniemeyer/pyJac",
"id": "93402ec4a3d9b85f5e1fc76ee0d8433593445d2f",
"size": "12061",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyjac/libgen/libgen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6869"
},
{
"name": "Cuda",
"bytes": "10026"
},
{
"name": "Python",
"bytes": "509231"
}
],
"symlink_target": ""
} |
import socket
import re
import simplejson
import urllib
import urllib2
import urlclean
url = "https://www.virustotal.com/vtapi/v2/url/report"
server = "server"
channel = "#channel"
botnick = "botnick"
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ircsock.connect((server, 6667)) #connect to server using 6667
ircsock.send(u'NICK %s\n' % (botnick))#user auth
ircsock.send(u'USER %s bla %s :%s\n' % (botnick, server, botnick))
def ping():
ircsock.send("PONG :pong\n")
def sendmsg(chan, msg):
ircsock.send("PRIVMSG "+chan+" :" + msg +"\n")
def joinchan(chan):
ircsock.send("JOIN " + chan +"\n")
def extracturl(msg):
if "PRIVMSG" not in msg:
return False
url = re.search("(?P<url>https?://[^\s]+)", msg)
if url is not None:
link = url.group("url")
return urlclean.unshorten(link)
else:
return False
joinchan(channel)
while True:
ircmsg = ircsock.recv(2048)
ircmsg = ircmsg.strip('\n\r')
if extracturl(ircmsg) != False:
link = extracturl(ircmsg)
parameters = {"resource": link,
"apikey": "virus total api key"}
data = urllib.urlencode(parameters)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
json = response.read()
response_dict = simplejson.loads(json)
positives = response_dict.get('positives')
print(ircmsg)
if ircmsg.find("PING :") != -1: #respond to server pings
ping()
| {
"content_hash": "75338d4f81e392aebeb5450713788b8e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 67,
"avg_line_length": 23.389830508474578,
"alnum_prop": 0.6746376811594202,
"repo_name": "Z-KO/url-expanderscanner-bot",
"id": "9f5c6e8f0123a1a4708c82019be2cb4a331c0ed3",
"size": "1380",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "botmain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1380"
}
],
"symlink_target": ""
} |
import unittest
from MapperSwitch import MapperSwitch
from Wiring import Wiring
class TestMapperSwitch(unittest.TestCase):
def setUp(self):
self.wiringCfg={
"0": [
0
],
"1": [
2,0
],
"2": [
1,0
]
}
self.mapper=MapperSwitch(Wiring(self.wiringCfg))
def testSignal(self):
self.assertEqual(1,len(self.mapper.signalIn(0)))
self.assertEqual(2,len(self.mapper.signalIn(1)))
def testinvalidOperation(self):
with self.assertRaises(Exception) as c:
self.mapper.reverseSignal(0)
| {
"content_hash": "91086feae13d0cb5475dc9bb62107eab",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 56,
"avg_line_length": 25.5,
"alnum_prop": 0.5384615384615384,
"repo_name": "mrfawy/ModernEnigma",
"id": "d6fa7972594aadfd0388b9a0b9bca4f77bf992ce",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/testMapperSwitch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82649"
},
{
"name": "Shell",
"bytes": "21"
}
],
"symlink_target": ""
} |
__author__ = 'Amy'
import os,zipfile
from .utils import makeDir
from library.core.utils import compress_dir,unzip
no_zip_res = {
'mongodb-2.4.5':[
"data/",
"logs/",
],
"mysql-5.1":[
'data/',
],
"openssl-1.9.8":[
'certs/',
],
'nginx-1.5.12':[
'conf/certs/ptphp.com.key'
'conf/certs/ptphp.net.key'
]
}
def get_zip_version(name,dir):
path = os.path.join(dir,name)
print "get version :" + path
versions = []
for file in os.listdir(dir):
if name in file and ".zip" in file:
versions.append(int(file.replace(name+"-","").replace(".zip","")))
if versions:
v = sorted(versions,reverse = True)[0]
else:
v = 0
return v+1
def zip_local_all(dir):
dir_local = os.path.join(dir,"local")
for plugin_name in os.listdir(dir_local):
zip_local(dir,plugin_name)
def zip_local(dir,name):
no_zip_res_plugin = []
if name in no_zip_res.keys():
print name
no_zip_res_plugin = no_zip_res[name]
dir_local = os.path.join(dir,"local")
dir_local_zip = os.path.join(dir,"local_zip")
version = get_zip_version(name,dir_local_zip)
plugin_name_zip_file_name = name+"-"+str(version)+".zip"
print plugin_name_zip_file_name
plugin_name_zip_file_name_path = os.path.join(dir_local_zip,plugin_name_zip_file_name)
path_root = os.path.join(dir_local,name)
if False == os.path.isdir(path_root):
print "no exists : " + path_root
return
os.chdir(path_root)
compress_dir("./",plugin_name_zip_file_name_path,no_zip_res_plugin)
def unzip_local(dir,zip_name):
(dirname,filename) = os.path.split(zip_name)
print (dirname,filename)
dir = os.path.join(dir,filename.replace(".zip",""))
print dir
if os.path.exists(dir):
print "exists : "+ dir
return
if os.path.exists(zip_name) == True:
unzip(zip_name,dir)
else:
print "no exsits : "+zip_name
| {
"content_hash": "bb97a15f79d764fe7a3e5d4afa697725",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 90,
"avg_line_length": 25.379746835443036,
"alnum_prop": 0.5845386533665835,
"repo_name": "ptphp/PtServer",
"id": "7923f7d120e8b70b9bd660233f6b178be69bb7d8",
"size": "2005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/core/dist_zip.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "59"
},
{
"name": "Python",
"bytes": "51117"
},
{
"name": "Ruby",
"bytes": "356022"
},
{
"name": "Shell",
"bytes": "890"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Aisle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text=b'Display name for the aisle.', max_length=60, unique=True)),
('sort_name', models.CharField(help_text=b'Name the aisle is sorted on. Not displayed to the user.', max_length=60, verbose_name=b'Order')),
('description', models.TextField(blank=True, null=True)),
('active', models.BooleanField(help_text=b"Determines whether the Aisle is active to the user. This doesn't affect the active status of items.")),
],
options={
'ordering': ['sort_name'],
},
),
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60, unique=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Bundle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name=b'Internal name')),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text=b'Display name for the item.', max_length=60)),
('sort_name', models.CharField(help_text=b'Name the item is sorted on. Not displayed to the user.', max_length=60, verbose_name=b'Sort No.')),
('order_name', models.CharField(help_text=b'Used in the order form.', max_length=60)),
('description', models.TextField(blank=True, null=True)),
('active', models.BooleanField()),
('organic', models.BooleanField()),
('date_created', models.DateField(auto_now_add=True)),
('date_updated', models.DateField(auto_now=True)),
('new_changed', models.BooleanField(verbose_name=b'New/Changed')),
('unit_number', models.PositiveIntegerField(help_text=b'How many units make up this item?', verbose_name=b'Unit')),
('measure_per_unit', models.FloatField(blank=True, null=True)),
('measure_type', models.CharField(blank=True, max_length=10, null=True)),
('price', models.DecimalField(blank=True, decimal_places=2, max_digits=4, null=True)),
('price_change', models.CharField(choices=[(b'increase', b'Increase'), (b'no_change', b'No change'), (b'decrease', b'Decrease')], default=b'no_change', max_length=30, null=True)),
('picking_order', models.IntegerField(choices=[(1, b'1'), (2, b'2'), (3, b'3'), (4, b'4'), (5, b'5'), (6, b'6'), (7, b'7'), (8, b'8'), (9, b'9'), (10, b'10'), (11, b'11'), (12, b'12'), (13, b'13'), (14, b'14'), (15, b'15'), (16, b'16'), (17, b'17'), (18, b'18'), (19, b'19'), (20, b'20')], default=9, verbose_name=b'Picking Order')),
('aisle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='catalogue.Aisle')),
('brand', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='catalogue.Brand')),
('bundle', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='catalogue.Bundle')),
],
options={
'ordering': ['sort_name'],
},
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name=b'Internal name')),
('title', models.CharField(max_length=60, verbose_name=b'Page title')),
('body', models.TextField()),
],
),
migrations.CreateModel(
name='VirtualShopPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='catalogue.Page')),
('shopPdf', models.FileField(upload_to=b'files', verbose_name=b'Shop PDF file')),
],
bases=('catalogue.page',),
),
migrations.AddField(
model_name='bundle',
name='items',
field=models.ManyToManyField(related_name='bundle_item', to='catalogue.Item'),
),
]
| {
"content_hash": "031440b022656128efbe87e1607a1c65",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 349,
"avg_line_length": 55.329787234042556,
"alnum_prop": 0.560661411267064,
"repo_name": "brew/fruitynutters",
"id": "3dbc1c9b4e6f9d96b18c5af89ae39aea8496be58",
"size": "5273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapps/fruitynutters/catalogue/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45645"
},
{
"name": "HTML",
"bytes": "26576"
},
{
"name": "JavaScript",
"bytes": "226721"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "67146"
}
],
"symlink_target": ""
} |
import argparse
import json
import logging
import os
import re
import warnings
from carto.auth import APIKeyAuthClient
from carto.datasets import DatasetManager
warnings.filterwarnings('ignore')
# python import_from_database.py --connection='{ \
# "connector": { \
# "provider": "hive", \
# "connection": { \
# "server":"YOUR_SERVER_IP", \
# "database":"default", \
# "username":"cloudera", \
# "password":"cloudera" \
# },
# "schema": "default", \
# "table": "order_items" \
# }
# }'
# Logger (better than print)
logging.basicConfig(
level=logging.INFO,
format=' %(asctime)s - %(levelname)s - %(message)s',
datefmt='%I:%M:%S %p')
logger = logging.getLogger()
# set input arguments
parser = argparse.ArgumentParser(
description='External database connector')
parser.add_argument('--connection', type=str, dest='connection',
help='An external database connection JSON object')
parser.add_argument('--organization', type=str, dest='organization',
default=os.environ['CARTO_ORG'] if 'CARTO_ORG' in os.environ else '',
help='Set the name of the organization' +
' account (defaults to env variable CARTO_ORG)')
parser.add_argument('--base_url', type=str, dest='CARTO_BASE_URL',
default=os.environ['CARTO_API_URL'] if 'CARTO_API_URL' in os.environ else '',
help='Set the base URL. For example:' +
' https://username.carto.com/ ' +
'(defaults to env variable CARTO_API_URL)')
parser.add_argument('--api_key', dest='CARTO_API_KEY',
default=os.environ['CARTO_API_KEY'] if 'CARTO_API_KEY' in os.environ else '',
help='Api key of the account' +
' (defaults to env variable CARTO_API_KEY)')
args = parser.parse_args()
# Set authentification to CARTO
if args.CARTO_BASE_URL and args.CARTO_API_KEY and args.organization:
auth_client = APIKeyAuthClient(
args.CARTO_BASE_URL, args.CARTO_API_KEY, args.organization)
else:
logger.error('You need to provide valid credentials, run with -h parameter for details')
import sys
sys.exit(1)
# get username from base_url
substring = re.search('https://(.+?).carto.com', args.CARTO_BASE_URL)
if substring:
username = substring.group(1)
# Dataset manager
dataset_manager = DatasetManager(auth_client)
connection = json.loads(args.connection.replace("\\", ""))
logger.info(connection)
table = dataset_manager.create(None, None, connection=connection)
logger.info(
'Table imported: {table}'.format(table=table.name))
| {
"content_hash": "785f200fdfac00b376d46f9965a79c16",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 97,
"avg_line_length": 32.95061728395062,
"alnum_prop": 0.6346946421880855,
"repo_name": "CartoDB/carto-python",
"id": "6a4dcfca3790a59d7c29ba47c514ad1d062f7b56",
"size": "2669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/import_from_database.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "148350"
}
],
"symlink_target": ""
} |
import factory
from factory import fuzzy
from mock import patch
from django.utils.timezone import now
from shopify.product.models import Product
from shopify.product.models import Transaction
from shopify.webhook.models import Webhook
class ProductFactory(factory.django.DjangoModelFactory):
class Meta:
model = Product
product_id = fuzzy.FuzzyInteger(100000, 999999)
product_type = fuzzy.FuzzyChoice(['Deposit', 'Fee', 'Purchase'])
description = fuzzy.FuzzyText(length=64)
account_number = fuzzy.FuzzyInteger(1000000, 9999999)
class TransactionFactory(factory.django.DjangoModelFactory):
class Meta:
model = Transaction
product = factory.SubFactory(ProductFactory)
amount = fuzzy.FuzzyFloat(1.00, 100.00)
is_credit = True
order_id = fuzzy.FuzzyInteger(1000000, 9999999)
order_name = fuzzy.FuzzyText(length=8)
item_id = fuzzy.FuzzyInteger(100000, 999999)
created_at = now()
class WebhookFactory(factory.django.DjangoModelFactory):
class Meta:
model = Webhook
@classmethod
def _create(cls, target_class, *args, **kwargs):
with patch('requests.post') as mock:
mock.return_value.status_code = 200
mock.return_value.raise_for_status.return_value = None
mock.return_value.raise_for_status()
mock.return_value.json.return_value = {'webhook': {'id': 12345}}
mock.return_value.json()
return super(WebhookFactory, cls)._create(target_class, *args, **kwargs)
| {
"content_hash": "11b4d1a1a16b356bfa93e4ed5e8e0ba0",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 84,
"avg_line_length": 32.51063829787234,
"alnum_prop": 0.6976439790575916,
"repo_name": "CorbanU/corban-shopify",
"id": "fc7cbd2900f88739c76bf00a9f4daccbb7f83e4b",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shopify/webhook/tests/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "435"
},
{
"name": "Python",
"bytes": "49574"
}
],
"symlink_target": ""
} |
import argparse
from neutronclient._i18n import _
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.neutron.v2_0.vpn import utils as vpn_utils
def add_common_args(parser, is_create=True):
parser.add_argument(
'--auth-algorithm',
default='sha1' if is_create else argparse.SUPPRESS,
type=utils.convert_to_lowercase,
choices=['sha1', 'sha256', 'sha384', 'sha512'],
help=_('Authentication algorithm for IPsec policy, default:sha1.'))
parser.add_argument(
'--description',
help=_('Description of the IPsec policy.'))
parser.add_argument(
'--encapsulation-mode',
default='tunnel' if is_create else argparse.SUPPRESS,
choices=['tunnel', 'transport'],
type=utils.convert_to_lowercase,
help=_('Encapsulation mode for IPsec policy, default:tunnel.'))
parser.add_argument(
'--encryption-algorithm',
default='aes-128' if is_create else argparse.SUPPRESS,
type=utils.convert_to_lowercase,
help=_('Encryption algorithm for IPsec policy, default:aes-128.'))
parser.add_argument(
'--lifetime',
metavar="units=UNITS,value=VALUE",
type=utils.str2dict_type(optional_keys=['units', 'value']),
help=vpn_utils.lifetime_help("IPsec"))
parser.add_argument(
'--pfs',
default='group5' if is_create else argparse.SUPPRESS,
type=utils.convert_to_lowercase,
help=_('Perfect Forward Secrecy for IPsec policy, default:group5.'))
parser.add_argument(
'--transform-protocol',
default='esp' if is_create else argparse.SUPPRESS,
type=utils.convert_to_lowercase,
choices=['esp', 'ah', 'ah-esp'],
help=_('Transform protocol for IPsec policy, default:esp.'))
def parse_common_args2body(parsed_args, body):
neutronv20.update_dict(parsed_args, body,
['auth_algorithm', 'encryption_algorithm',
'encapsulation_mode', 'transform_protocol',
'pfs', 'name', 'description', 'tenant_id'])
if parsed_args.lifetime:
vpn_utils.validate_lifetime_dict(parsed_args.lifetime)
body['lifetime'] = parsed_args.lifetime
return body
class ListIPsecPolicy(neutronv20.ListCommand):
"""List IPsec policies that belong to a given tenant connection."""
resource = 'ipsecpolicy'
list_columns = ['id', 'name', 'auth_algorithm',
'encryption_algorithm', 'pfs']
_formatters = {}
pagination_support = True
sorting_support = True
class ShowIPsecPolicy(neutronv20.ShowCommand):
"""Show information of a given IPsec policy."""
resource = 'ipsecpolicy'
help_resource = 'IPsec policy'
class CreateIPsecPolicy(neutronv20.CreateCommand):
"""Create an IPsec policy."""
resource = 'ipsecpolicy'
help_resource = 'IPsec policy'
def add_known_arguments(self, parser):
parser.add_argument(
'name', metavar='NAME',
help=_('Name of the IPsec policy.'))
add_common_args(parser)
def args2body(self, parsed_args):
return {'ipsecpolicy': parse_common_args2body(parsed_args, body={})}
class UpdateIPsecPolicy(neutronv20.UpdateCommand):
"""Update a given IPsec policy."""
resource = 'ipsecpolicy'
help_resource = 'IPsec policy'
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help=_('Updated name of the IPsec policy.'))
add_common_args(parser, is_create=False)
def args2body(self, parsed_args):
return {'ipsecpolicy': parse_common_args2body(parsed_args, body={})}
class DeleteIPsecPolicy(neutronv20.DeleteCommand):
"""Delete a given IPsec policy."""
resource = 'ipsecpolicy'
help_resource = 'IPsec policy'
| {
"content_hash": "b765a8370372cfa5819a224df88a495d",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 76,
"avg_line_length": 34.48672566371681,
"alnum_prop": 0.6412625096227867,
"repo_name": "openstack/python-neutronclient",
"id": "ea70155b7e3bb1a8b73dd3589ccdc8f64bb2ebc0",
"size": "4566",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutronclient/neutron/v2_0/vpn/ipsecpolicy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1756257"
},
{
"name": "Shell",
"bytes": "10126"
}
],
"symlink_target": ""
} |
"""Top-level presubmit script for checkxmlstyle.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
USE_PYTHON3 = True
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
result = []
result.extend(
input_api.canned_checks.RunUnitTests(input_api,
output_api,
['./checkxmlstyle_test.py'],
run_on_python2=False,
run_on_python3=True))
return result
| {
"content_hash": "a78a6bfeec817215f891a6fa84fd4217",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 29.88888888888889,
"alnum_prop": 0.5910780669144982,
"repo_name": "nwjs/chromium.src",
"id": "f4c3abaa9238f4bb6cf42616ae71251ffbb4f873",
"size": "948",
"binary": false,
"copies": "8",
"ref": "refs/heads/nw70",
"path": "tools/android/checkxmlstyle/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Unit tests for gauge"""
from collections import namedtuple
import random
import re
import shutil
import tempfile
import threading
import time
import os
import unittest
from unittest import mock
from http.server import HTTPServer, BaseHTTPRequestHandler
import yaml
import requests
from requests.exceptions import ConnectionError, ReadTimeout
from ryu.controller.ofp_event import EventOFPMsgBase
from ryu.lib import type_desc
from ryu.lib import hub
from ryu.ofproto import ofproto_v1_3 as ofproto
from ryu.ofproto import ofproto_v1_3_parser as parser
from prometheus_client import CollectorRegistry
from faucet import gauge, gauge_prom, gauge_influx, gauge_pollers, watcher
class QuietHandler(BaseHTTPRequestHandler):
"""Don't log requests."""
def log_message(self, _format, *_args):
pass
def table_by_id(i):
table = mock.Mock()
table_name = mock.PropertyMock(return_value='table' + str(i))
type(table).name = table_name
return table
def create_mock_datapath(num_ports):
"""Mock a datapath by creating mocked datapath ports."""
ports = {}
for i in range(1, num_ports + 1):
port = mock.Mock()
port_name = mock.PropertyMock(return_value='port' + str(i))
type(port).name = port_name
ports[i] = port
datapath = mock.Mock(ports=ports, dp_id=random.randint(1, 5000))
datapath.table_by_id = table_by_id
dp_name = mock.PropertyMock(return_value='datapath')
type(datapath).name = dp_name
return datapath
def start_server(handler):
""" Starts a HTTPServer and runs it as a daemon thread """
server = HTTPServer(('', 0), handler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
return server
def port_state_msg(datapath, port_num, reason, status=0):
""" Create an OFPPortStatus message with random values. """
port = parser.OFPPort(port_num,
'00:00:00:d0:00:0'+ str(port_num),
datapath.ports[port_num].name,
0,
status,
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000)
)
return parser.OFPPortStatus(datapath, reason, port)
def port_stats_msg(datapath):
""" Create an OFPPortStatsReply with random values. """
stats = []
sec = random.randint(1, 10000)
nsec = random.randint(0, 10000)
for port_num in datapath.ports:
port_stats = parser.OFPPortStats(port_num,
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
random.randint(0, 10000),
sec,
nsec
)
stats.append(port_stats)
return parser.OFPPortStatsReply(datapath, body=stats)
def flow_stats_msg(datapath, instructions):
""" Create an OFPFlowStatsReply with random values. """
matches = generate_all_matches()
flow_stats = parser.OFPFlowStats(random.randint(0, 9),
random.randint(1, 10000),
random.randint(0, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
0,
random.randint(1, 10000),
random.randint(1, 10000),
random.randint(1, 10000),
matches,
instructions
)
return parser.OFPFlowStatsReply(datapath, body=[flow_stats])
def generate_all_matches():
"""
Generate all OpenFlow Extensible Matches (oxm) and return
a single OFPMatch with all of these oxms. The value for each
oxm is the largest value possible for the data type. For
example, the largest number for a 4 bit int is 15.
"""
matches = dict()
for oxm_type in ofproto.oxm_types:
if oxm_type.type == type_desc.MacAddr:
value = 'ff:ff:ff:ff:ff:ff'
elif oxm_type.type == type_desc.IPv4Addr:
value = '255.255.255.255'
elif oxm_type.type == type_desc.IPv6Addr:
value = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
elif isinstance(oxm_type.type, type_desc.IntDescr):
value = 2**oxm_type.type.size - 1
else:
continue
matches[oxm_type.name] = value
return parser.OFPMatch(**matches)
def logger_to_ofp(port_stats):
""" Translates between the logger stat name and the OpenFlow stat name"""
return {'packets_out': port_stats.tx_packets,
'packets_in': port_stats.rx_packets,
'bytes_out' : port_stats.tx_bytes,
'bytes_in' : port_stats.rx_bytes,
'dropped_out' : port_stats.tx_dropped,
'dropped_in' : port_stats.rx_dropped,
'errors_in' : port_stats.rx_errors
}
def get_matches(match_dict):
"""Create a set of match name and value tuples"""
return {(entry['OXMTlv']['field'], entry['OXMTlv']['value']) for entry in match_dict}
def check_instructions(original_inst, logger_inst, test):
"""
Check that the original instructions matches the
instructions from the logger
"""
for inst_type, inst in logger_inst[0].items():
test.assertEqual(original_inst[0].__class__.__name__, inst_type)
for attr_name, attr_val in inst.items():
original_val = getattr(original_inst[0], attr_name)
test.assertEqual(original_val, attr_val)
def compare_flow_msg(flow_msg, flow_dict, test):
"""
Compare the body section of an OFPFlowStatsReply
message to a dict representation of it
"""
for stat_name, stat_val in flow_dict.items():
if stat_name == 'match':
match_set = get_matches(stat_val['OFPMatch']['oxm_fields'])
test.assertEqual(match_set, set(flow_msg.body[0].match.items()))
elif stat_name == 'instructions':
check_instructions(flow_msg.body[0].instructions, stat_val, test)
else:
test.assertEqual(getattr(flow_msg.body[0], stat_name), stat_val)
class PretendInflux(QuietHandler):
"""An HTTP Handler that receives InfluxDB messages."""
def do_POST(self): # pylint: disable=invalid-name
""" Write request contents to the HTTP server,
if there is an output file to write to. """
if hasattr(self.server, 'output_file'):
content_length = int(self.headers['content-length'])
data = self.rfile.read(content_length)
data = data.decode('utf-8')
with open(self.server.output_file, 'w') as log:
log.write(data)
self.send_response(204)
self.end_headers()
class GaugePrometheusTests(unittest.TestCase): # pytype: disable=module-attr
"""Tests the GaugePortStatsPrometheusPoller update method"""
prom_client = gauge_prom.GaugePrometheusClient(reg=CollectorRegistry())
def parse_prom_output(self, output):
"""Parses the port stats from prometheus into a dictionary"""
parsed_output = {}
for line in output.split('\n'):
# discard comments and stats not related to port stats
if line.startswith('#') or not line.startswith(gauge_prom.PROM_PORT_PREFIX):
continue
index = line.find('{')
#get the stat name e.g. of_port_rx_bytes and strip 'of_port_'
prefix = gauge_prom.PROM_PORT_PREFIX + gauge_prom.PROM_PREFIX_DELIM
stat_name = line[0:index].replace(prefix, '')
#get the labels within {}
labels = line[index + 1:line.find('}')].split(',')
for label in labels:
lab_name, lab_val = label.split('=')
lab_val = lab_val.replace('"', '')
if lab_name == 'dp_id':
dp_id = int(lab_val, 16)
elif lab_name == 'port_name':
port_name = lab_val
key = (dp_id, port_name)
stat_val = line.split(' ')[1]
if key not in parsed_output:
parsed_output[key] = []
parsed_output[key].append((stat_name, float(stat_val)))
return parsed_output
def get_prometheus_stats(self, addr, port):
"""Attempts to contact the prometheus server
at the address to grab port stats."""
url = 'http://{}:{}'.format(addr, port)
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=10)
session.mount('http://', adapter)
return session.get(url).text
def test_poller(self):
"""Test the update method to see if it pushes port stats"""
datapath = create_mock_datapath(2)
conf = mock.Mock(dp=datapath,
type='',
interval=1,
prometheus_port=9303,
prometheus_addr='localhost',
use_test_thread=True
)
prom_poller = gauge_prom.GaugePortStatsPrometheusPoller(conf, '__name__', self.prom_client)
msg = port_stats_msg(datapath)
prom_poller.update(time.time(), datapath.dp_id, msg)
prom_lines = self.get_prometheus_stats(conf.prometheus_addr, conf.prometheus_port)
prom_lines = self.parse_prom_output(prom_lines)
for port_num, port in datapath.ports.items():
port_stats = msg.body[int(port_num) - 1]
stats = prom_lines[(datapath.dp_id, port.name)]
stats_found = set()
for stat_name, stat_val in stats:
self.assertAlmostEqual(stat_val, getattr(port_stats, stat_name))
stats_found.add(stat_name)
self.assertEqual(stats_found, set(gauge_prom.PROM_PORT_VARS))
def test_port_state(self):
"""Test the update method to see if it pushes port state"""
datapath = create_mock_datapath(2)
conf = mock.Mock(dp=datapath,
type='',
interval=1,
prometheus_port=9303,
prometheus_addr='localhost',
use_test_thread=True
)
prom_poller = gauge_prom.GaugePortStatePrometheusPoller(conf, '__name__', self.prom_client)
reasons = [ofproto.OFPPR_ADD, ofproto.OFPPR_DELETE, ofproto.OFPPR_MODIFY]
for i in range(1, len(conf.dp.ports) + 1):
msg = port_state_msg(conf.dp, i, reasons[i-1])
port_name = conf.dp.ports[i].name
rcv_time = int(time.time())
prom_poller.update(rcv_time, conf.dp.dp_id, msg)
prom_lines = self.get_prometheus_stats(conf.prometheus_addr, conf.prometheus_port)
prom_lines = self.parse_prom_output(prom_lines)
stats = prom_lines[(datapath.dp_id, port_name)]
stats_found = set()
for stat_name, stat_val in stats:
msg_data = msg if stat_name == 'reason' else msg.desc
self.assertAlmostEqual(stat_val, getattr(msg_data, stat_name))
stats_found.add(stat_name)
self.assertEqual(stats_found, set(gauge_prom.PROM_PORT_STATE_VARS))
def test_flow_stats(self):
"""Check the update method of the GaugeFlowTablePrometheusPoller class"""
datapath = create_mock_datapath(2)
conf = mock.Mock(dp=datapath,
type='',
interval=1,
prometheus_port=9303,
prometheus_addr='localhost',
use_test_thread=True
)
prom_poller = gauge_prom.GaugeFlowTablePrometheusPoller(conf, '__name__', self.prom_client)
rcv_time = int(time.time())
instructions = [parser.OFPInstructionGotoTable(1)]
msg = flow_stats_msg(conf.dp, instructions)
prom_poller.update(rcv_time, conf.dp.dp_id, msg)
class GaugeInfluxShipperTest(unittest.TestCase): # pytype: disable=module-attr
"""Tests the InfluxShipper"""
def create_config_obj(self, port=12345):
"""Create a mock config object that contains the necessary InfluxDB config"""
conf = mock.Mock(influx_host='localhost',
influx_port=port,
influx_user='gauge',
influx_pwd='',
influx_db='gauge',
influx_timeout=10
)
return conf
def get_values(self, dict_to_unpack):
"""Get all the values from a nested dictionary"""
values = []
for value in dict_to_unpack.values():
if isinstance(value, dict):
values.extend(self.get_values(value))
else:
values.append(value)
return values
def test_ship_success(self):
"""Checks that the shipper successsfully connects
to a HTTP server when the points are shipped"""
try:
server = start_server(PretendInflux)
shipper = gauge_influx.InfluxShipper()
shipper.conf = self.create_config_obj(server.server_port)
points = [{'measurement': 'test_stat_name', 'fields' : {'value':1}},]
shipper.ship_points(points)
except (ConnectionError, ReadTimeout) as err:
self.fail("Code threw an exception: {}".format(err))
finally:
server.socket.close()
server.shutdown()
def test_ship_connection_err(self):
"""Checks that even when there is a connection error,
there is no exception thrown"""
try:
shipper = gauge_influx.InfluxShipper()
shipper.conf = self.create_config_obj()
shipper.logger = mock.Mock()
points = [{'measurement': 'test_stat_name', 'fields' : {'value':1}},]
shipper.ship_points(points)
except (ConnectionError, ReadTimeout) as err:
self.fail("Code threw an exception: {}".format(err))
def test_ship_no_config(self):
"""Check that no exceptions are thrown when
there is no config"""
try:
shipper = gauge_influx.InfluxShipper()
points = [{'measurement': 'test_stat_name', 'fields' : {'value':1}},]
shipper.ship_points(points)
except (ConnectionError, ReadTimeout) as err:
self.fail("Code threw an exception: {}".format(err))
def test_point(self):
"""Checks that the points produced still have the variables given to it"""
shipper = gauge_influx.InfluxShipper()
dp_name = 'faucet-1'
port_name = 'port1.0.1'
rcv_time = int(time.time())
stat_name = 'test_stat_name'
#max uint64 number
stat_val = 2**64 - 1
port_point = shipper.make_port_point(dp_name, port_name, rcv_time, stat_name, stat_val)
values = {dp_name, port_name, rcv_time, stat_name}
port_vals = set(self.get_values(port_point))
port_vals_stat = port_vals.difference(values)
self.assertEqual(len(port_vals_stat), 1)
self.assertAlmostEqual(port_vals_stat.pop(), stat_val)
tags = {'dp_name': dp_name, 'port_name': port_name}
point = shipper.make_point(tags, rcv_time, stat_name, stat_val)
point_vals = set(self.get_values(point))
point_vals_stat = point_vals.difference(values)
self.assertEqual(len(point_vals_stat), 1)
self.assertAlmostEqual(point_vals_stat.pop(), stat_val)
class GaugeInfluxUpdateTest(unittest.TestCase): # pytype: disable=module-attr
"""Test the Influx loggers update methods"""
server = None
def setUp(self):
""" Starts up an HTTP server to mock InfluxDB.
Also opens a new temp file for the server to write to """
self.server = start_server(PretendInflux)
self.temp_fd, self.server.output_file = tempfile.mkstemp()
def tearDown(self):
""" Close the temp file (which should delete it)
and stop the HTTP server """
os.close(self.temp_fd)
os.remove(self.server.output_file)
self.server.socket.close()
self.server.shutdown()
def create_config_obj(self, datapath):
"""Create a mock config object that contains the necessary InfluxDB config"""
conf = mock.Mock(influx_host='localhost',
influx_port=self.server.server_port,
influx_user='gauge',
influx_pwd='',
influx_db='gauge',
influx_timeout=10,
interval=5,
dp=datapath
)
return conf
@staticmethod
def parse_key_value(dictionary, kv_list):
"""
When given a list consisting of strings such as: 'key1=val1',
add to the dictionary as dictionary['key1'] = 'val1'.
Ignore entries in the list which do not contain '='
"""
for key_val in kv_list:
if '=' in key_val:
key, val = key_val.split('=')
try:
val = float(val)
val = int(val)
except ValueError:
pass
dictionary[key] = val
def parse_influx_output(self, output_to_parse):
"""
Parse the output from the mock InfluxDB server
The usual layout of the output is:
measurement,tag1=val1,tag2=val2 field1=val3 timestamp
The tags are separated with a comma and the fields
are separated with a space. The measurement always
appears first, and the timestamp is always last
"""
influx_data = dict()
tags = output_to_parse.split(',')
fields = tags[-1].split(' ')
tags[-1] = fields[0]
influx_data['timestamp'] = int(fields[-1])
fields = fields[1:-1]
self.parse_key_value(influx_data, tags)
self.parse_key_value(influx_data, fields)
return (tags[0], influx_data)
def test_port_state(self):
""" Check the update method of the GaugePortStateInfluxDBLogger class"""
conf = self.create_config_obj(create_mock_datapath(3))
db_logger = gauge_influx.GaugePortStateInfluxDBLogger(conf, '__name__', mock.Mock())
reasons = [ofproto.OFPPR_ADD, ofproto.OFPPR_DELETE, ofproto.OFPPR_MODIFY]
for i in range(1, len(conf.dp.ports) + 1):
msg = port_state_msg(conf.dp, i, reasons[i-1])
rcv_time = int(time.time())
db_logger.update(rcv_time, conf.dp.dp_id, msg)
with open(self.server.output_file, 'r') as log:
output = log.read()
influx_data = self.parse_influx_output(output)[1]
data = {conf.dp.name, conf.dp.ports[i].name, rcv_time, reasons[i-1]}
self.assertEqual(data, set(influx_data.values()))
def test_port_stats(self):
"""Check the update method of the GaugePortStatsInfluxDBLogger class"""
conf = self.create_config_obj(create_mock_datapath(2))
db_logger = gauge_influx.GaugePortStatsInfluxDBLogger(conf, '__name__', mock.Mock())
msg = port_stats_msg(conf.dp)
rcv_time = int(time.time())
db_logger.update(rcv_time, conf.dp.dp_id, msg)
with open(self.server.output_file, 'r') as log:
output = log.readlines()
for line in output:
measurement, influx_data = self.parse_influx_output(line)
# get the number at the end of the port_name
port_num = int(influx_data['port_name'][-1]) # pytype: disable=unsupported-operands
# get the original port stat value
port_stat_val = logger_to_ofp(
msg.body[port_num - 1])[measurement] # pytype: disable=unsupported-operands
self.assertEqual(port_stat_val, influx_data['value'])
self.assertEqual(conf.dp.name, influx_data['dp_name'])
self.assertEqual(rcv_time, influx_data['timestamp'])
def test_flow_stats(self):
"""Check the update method of the GaugeFlowTableInfluxDBLogger class"""
conf = self.create_config_obj(create_mock_datapath(0))
db_logger = gauge_influx.GaugeFlowTableInfluxDBLogger(conf, '__name__', mock.Mock())
rcv_time = int(time.time())
instructions = [parser.OFPInstructionGotoTable(1)]
msg = flow_stats_msg(conf.dp, instructions)
db_logger.update(rcv_time, conf.dp.dp_id, msg)
other_fields = {'dp_name': conf.dp.name,
'dp_id': hex(conf.dp.dp_id),
'timestamp': rcv_time,
'priority': msg.body[0].priority,
'table_id': msg.body[0].table_id,
'inst_count': len(msg.body[0].instructions),
'vlan': msg.body[0].match.get('vlan_vid') ^ ofproto.OFPVID_PRESENT,
'cookie': msg.body[0].cookie,
}
with open(self.server.output_file, 'r') as log:
output = log.readlines()
for line in output:
measurement, influx_data = self.parse_influx_output(line)
for stat_name, stat_val in influx_data.items():
if stat_name == 'value':
if measurement == 'flow_packet_count':
self.assertEqual(msg.body[0].packet_count, stat_val)
elif measurement == 'flow_byte_count':
self.assertEqual(msg.body[0].byte_count, stat_val)
else:
self.fail("Unknown measurement")
elif stat_name in other_fields:
self.assertEqual(other_fields[stat_name], stat_val)
elif stat_name in msg.body[0].match:
self.assertEqual(msg.body[0].match.get(stat_name), stat_val)
else:
self.fail("Unknown key: {} and value: {}".format(stat_name, stat_val))
class GaugeThreadPollerTest(unittest.TestCase): # pytype: disable=module-attr
"""Tests the methods in the GaugeThreadPoller class"""
def setUp(self):
"""Creates a gauge poller and initialises class variables"""
self.interval = 1
conf = mock.Mock(interval=self.interval)
self.poller = gauge_pollers.GaugeThreadPoller(conf, '__name__', mock.Mock())
self.send_called = False
def fake_send_req(self):
"""This should be called instead of the send_req method in the
GaugeThreadPoller class, which just throws an error"""
self.send_called = True
def fake_no_response(self):
"""This should be called instead of the no_response method in the
GaugeThreadPoller class, which just throws an error"""
pass
def test_start(self):
""" Checks if the poller is started """
self.poller.send_req = self.fake_send_req
self.poller.no_response = self.fake_no_response
self.poller.start(mock.Mock(), active=True)
poller_thread = self.poller.thread
hub.sleep(self.interval + 1)
self.assertTrue(self.send_called)
self.assertFalse(poller_thread.dead)
def test_stop(self):
""" Check if a poller can be stopped """
self.poller.send_req = self.fake_send_req
self.poller.no_response = self.fake_no_response
self.poller.start(mock.Mock(), active=True)
poller_thread = self.poller.thread
self.poller.stop()
hub.sleep(self.interval + 1)
self.assertFalse(self.send_called)
self.assertTrue(poller_thread.dead)
def test_active(self):
"""Check if active reflects the state of the poller """
self.assertFalse(self.poller.is_active())
self.assertFalse(self.poller.running())
self.poller.start(mock.Mock(), active=True)
self.assertTrue(self.poller.is_active())
self.assertTrue(self.poller.running())
self.poller.stop()
self.assertFalse(self.poller.is_active())
self.assertFalse(self.poller.running())
self.poller.start(mock.Mock(), active=False)
self.assertFalse(self.poller.is_active())
self.assertTrue(self.poller.running())
self.poller.stop()
self.assertFalse(self.poller.is_active())
self.assertFalse(self.poller.running())
class GaugePollerTest(unittest.TestCase): # pytype: disable=module-attr
"""Checks the send_req and no_response methods in a Gauge Poller"""
def check_send_req(self, poller, msg_class):
"""Check that the message being sent matches the expected one"""
datapath = mock.Mock(ofproto=ofproto, ofproto_parser=parser)
poller.start(datapath, active=True)
poller.stop()
poller.send_req()
for method_call in datapath.mock_calls:
arg = method_call[1][0]
self.assertTrue(isinstance(arg, msg_class))
def check_no_response(self, poller):
"""Check that no exception occurs when the no_response method is called"""
try:
poller.no_response()
except Exception as err:
self.fail("Code threw an exception: {}".format(err))
class GaugePortStatsPollerTest(GaugePollerTest):
"""Checks the GaugePortStatsPoller class"""
def test_send_req(self):
"""Check that the poller sends a port stats request"""
conf = mock.Mock(interval=1)
poller = gauge_pollers.GaugePortStatsPoller(conf, '__name__', mock.Mock())
self.check_send_req(poller, parser.OFPPortStatsRequest)
def test_no_response(self):
"""Check that the poller doesnt throw an exception"""
poller = gauge_pollers.GaugePortStatsPoller(mock.Mock(), '__name__', mock.Mock())
self.check_no_response(poller)
class GaugeFlowTablePollerTest(GaugePollerTest):
"""Checks the GaugeFlowTablePoller class"""
def test_send_req(self):
"""Check that the poller sends a flow stats request"""
conf = mock.Mock(interval=1)
poller = gauge_pollers.GaugeFlowTablePoller(conf, '__name__', mock.Mock())
self.check_send_req(poller, parser.OFPFlowStatsRequest)
def test_no_response(self):
"""Check that the poller doesnt throw an exception"""
poller = gauge_pollers.GaugeFlowTablePoller(mock.Mock(), '__name__', mock.Mock())
self.check_no_response(poller)
class GaugeWatcherTest(unittest.TestCase): # pytype: disable=module-attr
"""Checks the loggers in watcher.py."""
conf = None
temp_fd = None
temp_path = None
def setUp(self):
"""Creates a temporary file and a mocked conf object"""
self.temp_fd, self.temp_path = tempfile.mkstemp()
self.conf = mock.Mock(file=self.temp_path, compress=False)
def tearDown(self):
"""Closes and deletes the temporary file"""
os.close(self.temp_fd)
os.remove(self.temp_path)
def get_file_contents(self):
"""Return the contents of the temporary file and clear it"""
with open(self.temp_path, 'r+') as file_:
contents = file_.read()
file_.seek(0, 0)
file_.truncate()
return contents
def test_port_state(self):
"""Check the update method in the GaugePortStateLogger class"""
logger = watcher.GaugePortStateLogger(self.conf, '__name__', mock.Mock())
reasons = {'unknown' : 5,
'add' : ofproto.OFPPR_ADD,
'delete' : ofproto.OFPPR_DELETE,
'up' : ofproto.OFPPR_MODIFY,
'down' : ofproto.OFPPR_MODIFY
}
#add an ofproto attribute to the datapath
datapath = create_mock_datapath(1)
ofp_attr = {'ofproto': ofproto}
datapath.configure_mock(**ofp_attr)
for reason in reasons:
state = 0
if reason == 'down':
state = ofproto.OFPPS_LINK_DOWN
msg = port_state_msg(datapath, 1, reasons[reason], state)
logger.update(time.time(), datapath.dp_id, msg)
log_str = self.get_file_contents().lower()
self.assertTrue(reason in log_str)
self.assertTrue(msg.desc.name in log_str or 'port ' + str(msg.desc.port_no) in log_str)
hexs = re.findall(r'0x[0-9A-Fa-f]+', log_str)
hexs = [int(num, 16) for num in hexs]
self.assertTrue(datapath.dp_id in hexs or str(datapath.dp_id) in log_str)
def test_port_stats(self):
"""Check the update method in the GaugePortStatsLogger class"""
#add an ofproto attribute to the datapath
datapath = create_mock_datapath(2)
ofp_attr = {'ofproto': ofproto}
datapath.configure_mock(**ofp_attr)
#add the datapath as an attribute to the config
dp_attr = {'dp' : datapath}
self.conf.configure_mock(**dp_attr)
logger = watcher.GaugePortStatsLogger(self.conf, '__name__', mock.Mock())
msg = port_stats_msg(datapath)
original_stats = []
for i in range(0, len(msg.body)):
original_stats.append(logger_to_ofp(msg.body[i]))
logger.update(time.time(), datapath.dp_id, msg)
log_str = self.get_file_contents()
for stat_name in original_stats[0]:
stat_name = stat_name.split("_")
#grab any lines that mention the stat_name
pattern = r'^.*{}.{}.*$'.format(stat_name[0], stat_name[1])
stats_list = re.findall(pattern, log_str, re.MULTILINE)
for line in stats_list:
self.assertTrue(datapath.name in line)
#grab the port number (only works for single digit port nums)
index = line.find('port')
port_num = int(line[index + 4])
# grab the number at the end of the line
last_n = re.search(r'(\d+)$', line)
assert last_n
val = int(last_n.group())
logger_stat_name = '_'.join((stat_name[0], stat_name[1]))
original_val = original_stats[port_num - 1][logger_stat_name]
self.assertEqual(original_val, val)
def test_flow_stats(self):
"""Check the update method in the GaugeFlowStatsLogger class"""
#add an ofproto attribute to the datapath
datapath = create_mock_datapath(0)
ofp_attr = {'ofproto': ofproto}
datapath.configure_mock(**ofp_attr)
#add the datapath as an attribute to the config
dp_attr = {'dp' : datapath}
self.conf.configure_mock(**dp_attr)
logger = watcher.GaugeFlowTableLogger(self.conf, '__name__', mock.Mock())
instructions = [parser.OFPInstructionGotoTable(1)]
msg = flow_stats_msg(datapath, instructions)
logger.update(time.time(), datapath.dp_id, msg)
log_str = self.get_file_contents()
yaml_dict = yaml.load(log_str)['msg']['OFPFlowStatsReply']['body'][0]['OFPFlowStats']
compare_flow_msg(msg, yaml_dict, self)
class RyuAppSmokeTest(unittest.TestCase): # pytype: disable=module-attr
def setUp(self):
os.environ['GAUGE_LOG'] = '/dev/null'
os.environ['GAUGE_EXCEPTION_LOG'] = '/dev/null'
@staticmethod
def _fake_dp():
datapath = namedtuple('datapath', ['id', 'close'])(0, lambda: None)
return datapath
def _fake_event(self):
datapath = self._fake_dp()
msg = namedtuple('msg', ['datapath'])(datapath)
event = EventOFPMsgBase(msg=msg)
event.dp = msg.datapath
return event
def test_gauge(self):
"""Test Gauge can be initialized."""
os.environ['GAUGE_CONFIG'] = '/dev/null'
ryu_app = gauge.Gauge(
dpset={},
reg=CollectorRegistry())
ryu_app.reload_config(None)
self.assertFalse(ryu_app._config_files_changed())
ryu_app._update_watcher(None, self._fake_event())
ryu_app._start_watchers(self._fake_dp(), {})
for event_handler in (
ryu_app._datapath_connect,
ryu_app._datapath_disconnect):
event_handler(self._fake_event())
def test_gauge_config(self):
"""Test Gauge minimal config."""
tmpdir = tempfile.mkdtemp()
os.environ['FAUCET_CONFIG'] = os.path.join(tmpdir, 'faucet.yaml')
os.environ['GAUGE_CONFIG'] = os.path.join(tmpdir, 'gauge.yaml')
with open(os.environ['FAUCET_CONFIG'], 'w') as faucet_config:
faucet_config.write(
"""
vlans:
100:
description: "100"
dps:
dp1:
dp_id: 0x1
interfaces:
1:
description: "1"
native_vlan: 100
""")
os.environ['GAUGE_CONFIG'] = os.path.join(tmpdir, 'gauge.yaml')
with open(os.environ['GAUGE_CONFIG'], 'w') as gauge_config:
gauge_config.write(
"""
faucet_configs:
- '%s'
watchers:
port_status_poller:
type: 'port_state'
all_dps: True
db: 'prometheus'
port_stats_poller:
type: 'port_stats'
all_dps: True
interval: 10
db: 'prometheus'
flow_table_poller:
type: 'flow_table'
all_dps: True
interval: 60
db: 'prometheus'
dbs:
prometheus:
type: 'prometheus'
prometheus_addr: '0.0.0.0'
prometheus_port: 0
""" % os.environ['FAUCET_CONFIG'])
ryu_app = gauge.Gauge(
dpset={},
reg=CollectorRegistry())
ryu_app.reload_config(None)
self.assertTrue(ryu_app.watchers)
ryu_app.reload_config(None)
self.assertTrue(ryu_app.watchers)
shutil.rmtree(tmpdir)
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
| {
"content_hash": "7372835c54b884d1ec497aa3ca39cedf",
"timestamp": "",
"source": "github",
"line_count": 936,
"max_line_length": 99,
"avg_line_length": 37.542735042735046,
"alnum_prop": 0.5710586226522482,
"repo_name": "trentindav/faucet",
"id": "4879c8852e39e00e4d8f0fdc74cbfb2c58235bb8",
"size": "35140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/gauge/test_gauge.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2538"
},
{
"name": "Python",
"bytes": "1126434"
},
{
"name": "Shell",
"bytes": "19845"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cameras', '0002_auto_20151202_1520'),
]
operations = [
migrations.AlterField(
model_name='camera',
name='network',
field=models.OneToOneField(null=True, blank=True, to='apps.NetworkInfo'),
),
]
| {
"content_hash": "63073d8427a4e85e13f5a52d02278fa7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 85,
"avg_line_length": 23,
"alnum_prop": 0.606280193236715,
"repo_name": "theSoloBrewer/Project_Management",
"id": "a583d9846d42b2c0c486790fcdf97abe926339d6",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/cameras/migrations/0003_auto_20151202_1524.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "46287"
},
{
"name": "HTML",
"bytes": "55481"
},
{
"name": "JavaScript",
"bytes": "77703"
},
{
"name": "Python",
"bytes": "53727"
},
{
"name": "Shell",
"bytes": "165"
}
],
"symlink_target": ""
} |
from typing import Optional
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.schedules.schedule import Schedule
from ray.rllib.utils.typing import TensorType
tf1, tf, tfv = try_import_tf()
@PublicAPI
class ConstantSchedule(Schedule):
"""A Schedule where the value remains constant over time."""
def __init__(self, value: float, framework: Optional[str] = None):
"""Initializes a ConstantSchedule instance.
Args:
value: The constant value to return, independently of time.
framework: The framework descriptor string, e.g. "tf",
"torch", or None.
"""
super().__init__(framework=framework)
self._v = value
@override(Schedule)
def _value(self, t: TensorType) -> TensorType:
return self._v
@override(Schedule)
def _tf_value_op(self, t: TensorType) -> TensorType:
return tf.constant(self._v)
| {
"content_hash": "e446654933c49f6c27aa35495d8e5b4d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 71,
"avg_line_length": 31.1875,
"alnum_prop": 0.6643286573146293,
"repo_name": "ray-project/ray",
"id": "9681c51670404a401150db7cd546e8ca777587d1",
"size": "998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/utils/schedules/constant_schedule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
} |
name = "fortiosapi"
from .fortiosapi import FortiOSAPI
from .exceptions import (InvalidLicense, NotLogged)
| {
"content_hash": "55be4089d810a3569690128f7282b6b5",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 51,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.8130841121495327,
"repo_name": "thomnico/fortigateconf",
"id": "6f8309a756549d4494c858d920b66c0b5535c671",
"size": "107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fortiosapi/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15880"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import socket
import json
__all__ = ['Query', 'Socket']
class Query(object):
def __init__(self, conn, resource):
self._conn = conn
self._resource = resource
self._columns = []
self._filters = []
def call(self):
try:
data = bytes(str(self), 'utf-8')
except TypeError:
data = str(self)
return self._conn.call(data)
__call__ = call
def __str__(self):
request = 'GET %s' % (self._resource)
if self._columns and any(self._columns):
request += '\nColumns: %s' % (' '.join(self._columns))
if self._filters:
for filter_line in self._filters:
request += '\nFilter: %s' % (filter_line)
request += '\nOutputFormat: json\nColumnHeaders: on\n'
return request
def columns(self, *args):
self._columns = args
return self
def filter(self, filter_str):
self._filters.append(filter_str)
return self
class Socket(object):
def __init__(self, peer):
self.peer = peer
def __getattr__(self, name):
return Query(self, name)
def call(self, request):
try:
if len(self.peer) == 2:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.peer)
s.send(request)
s.shutdown(socket.SHUT_WR)
rawdata = s.makefile().read()
if not rawdata:
return []
data = json.loads(rawdata)
return [dict(zip(data[0], value)) for value in data[1:]]
finally:
s.close()
| {
"content_hash": "9951999b4bc662e33af2b1e75780f4ba",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 69,
"avg_line_length": 26.44776119402985,
"alnum_prop": 0.5208803611738149,
"repo_name": "frjaraur/python-mk-livestatus",
"id": "25ccdf9353d92ccd8003b862e807b23a0f8e2aa1",
"size": "1819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mk_livestatus/livestatus.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4397"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service_builder', '0015_auto_20180520_1112'),
]
operations = [
migrations.AddField(
model_name='service',
name='through_livy',
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "3805b28d74b401910f569ed9a5d30e69",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 21.88888888888889,
"alnum_prop": 0.6040609137055838,
"repo_name": "dipapaspyros/bdo_platform",
"id": "7cc7625d9f1a161eec29bf38b9445e36c93313aa",
"size": "465",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "service_builder/migrations/0016_service_through_livy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176900"
},
{
"name": "HTML",
"bytes": "69066"
},
{
"name": "JavaScript",
"bytes": "10644123"
},
{
"name": "Python",
"bytes": "195457"
},
{
"name": "XSLT",
"bytes": "1521"
}
],
"symlink_target": ""
} |
import os
import redis
import points_tracker.app
from rq import Worker, Queue, Connection
listen = ['audio_player']
conn = redis.from_url(os.getenv('REDIS_URL', 'redis://192.168.99.100:32770'))
if __name__ == '__main__':
with Connection(conn):
worker = Worker(list(map(Queue, listen)))
worker.work()
| {
"content_hash": "e8e0dad3a3e7c592a3327a3719760e46",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 77,
"avg_line_length": 23.142857142857142,
"alnum_prop": 0.654320987654321,
"repo_name": "paynejacob21/points-tracker",
"id": "0ec16bc8dfcc2a5e7632cb2dafdfee535c09e4ac",
"size": "324",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "150513"
},
{
"name": "CoffeeScript",
"bytes": "1656"
},
{
"name": "HTML",
"bytes": "18633"
},
{
"name": "JavaScript",
"bytes": "4071269"
},
{
"name": "Makefile",
"bytes": "3906"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "43306"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_person', '0002_remove_personcreator_name_full'),
]
operations = [
migrations.RenameField(
model_name='personcreator',
old_name='life_info_birth_place',
new_name='birth_place',
),
migrations.RenameField(
model_name='personcreator',
old_name='life_info_birth_place_historic',
new_name='birth_place_historic',
),
migrations.RenameField(
model_name='personcreator',
old_name='life_info_death_place',
new_name='death_place',
),
migrations.RemoveField(
model_name='personcreator',
name='life_info_birth_date_display',
),
migrations.RemoveField(
model_name='personcreator',
name='life_info_birth_date_edtf',
),
migrations.RemoveField(
model_name='personcreator',
name='life_info_death_date_display',
),
migrations.RemoveField(
model_name='personcreator',
name='life_info_death_date_edtf',
),
]
| {
"content_hash": "c498996a56821bd5df33aefd26805a38",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 73,
"avg_line_length": 29.227272727272727,
"alnum_prop": 0.5590979782270606,
"repo_name": "ic-labs/django-icekit",
"id": "1d591e90ce4e79712ff6bd67875df965954d3f84",
"size": "1310",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "glamkit_collections/contrib/work_creator/plugins/person/migrations/0003_auto_20170606_1158.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18019"
},
{
"name": "HTML",
"bytes": "92605"
},
{
"name": "JavaScript",
"bytes": "27803"
},
{
"name": "Python",
"bytes": "1476354"
},
{
"name": "Shell",
"bytes": "37850"
}
],
"symlink_target": ""
} |
def hello(name):
'this statement is for doc string example'
print "Function body", name
hello.__doc__
hello(name = 'started in Python') | {
"content_hash": "dd663a97cbb10f3c9acb7e48572e25a1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 43,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.7071428571428572,
"repo_name": "IPVL/Tanvin-PythonWorks",
"id": "c85944842182af3fb4210c2c39c026beda1b50dc",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter6/codes/funcIntro.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23097"
}
],
"symlink_target": ""
} |
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'subsample', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated in 0.17 and"
"will be removed in 0.19. It was replaced by "
"class_weight='balanced_subsample' using the balanced"
"strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| {
"content_hash": "0b05970c8c4d21e53d5e165c71a97748",
"timestamp": "",
"source": "github",
"line_count": 1688,
"max_line_length": 105,
"avg_line_length": 39.617298578199055,
"alnum_prop": 0.6050931602715555,
"repo_name": "sonnyhu/scikit-learn",
"id": "731885a1d414e9b3283260601629f6605065ed67",
"size": "66874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/ensemble/forest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "399679"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6595059"
},
{
"name": "Shell",
"bytes": "9216"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals, print_function
from django.db import models
class SingletonModel(models.Model):
class Meta:
abstract = True
def save(self, *args, **kwargs):
self.id=1
super(SingletonModel, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
pass
@classmethod
def only_one(cls):
return cls.objects.get(id=1) | {
"content_hash": "c23a4f0181aafd2f2f5dbbc84cea7c11",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 22,
"alnum_prop": 0.6339712918660287,
"repo_name": "chenchiyuan/yajiong",
"id": "80f25d29d9261465252b37203802164f21ce0967",
"size": "470",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "libs/models/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "95453"
},
{
"name": "JavaScript",
"bytes": "2550308"
},
{
"name": "Python",
"bytes": "93476"
},
{
"name": "Shell",
"bytes": "844"
}
],
"symlink_target": ""
} |
"""Tasks related to projects, including fetching repository code, cleaning
``conf.py`` files, and rebuilding documentation.
"""
import fnmatch
import os
import sys
import shutil
import json
import logging
import socket
import requests
import datetime
import hashlib
from collections import defaultdict
from celery import task, Task
from djcelery import celery as celery_app
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from slumber.exceptions import HttpClientError
from docker import errors as docker_errors
from readthedocs.builds.constants import (LATEST, BUILD_STATE_TRIGGERED,
BUILD_STATE_CLONING,
BUILD_STATE_INSTALLING,
BUILD_STATE_BUILDING,
BUILD_STATE_FINISHED)
from readthedocs.builds.models import Build, Version
from readthedocs.core.utils import send_email, run_on_app_servers
from readthedocs.cdn.purge import purge
from readthedocs.doc_builder.loader import get_builder_class
from readthedocs.doc_builder.base import restoring_chdir
from readthedocs.doc_builder.environments import (LocalEnvironment,
DockerEnvironment)
from readthedocs.doc_builder.exceptions import (BuildEnvironmentError,
BuildEnvironmentWarning)
from readthedocs.projects.exceptions import ProjectImportError
from readthedocs.projects.models import ImportedFile, Project
from readthedocs.projects.utils import make_api_version, make_api_project
from readthedocs.projects.constants import LOG_TEMPLATE
from readthedocs.builds.constants import STABLE
from readthedocs.projects import symlinks
from readthedocs.privacy.loader import Syncer
from readthedocs.search.parse_json import process_all_json_files
from readthedocs.search.utils import process_mkdocs_json
from readthedocs.restapi.utils import index_search_request
from readthedocs.vcs_support import utils as vcs_support_utils
from readthedocs.api.client import api as api_v1
from readthedocs.restapi.client import api as api_v2
from readthedocs.projects.signals import before_vcs, after_vcs, before_build, after_build
log = logging.getLogger(__name__)
HTML_ONLY = getattr(settings, 'HTML_ONLY_PROJECTS', ())
class UpdateDocsTask(Task):
"""
The main entry point for updating documentation.
It handles all of the logic around whether a project is imported or we
created it. Then it will build the html docs and other requested parts.
`pk`
Primary key of the project to update
`record`
Whether or not to keep a record of the update in the database. Useful
for preventing changes visible to the end-user when running commands
from the shell, for example.
"""
max_retries = 5
default_retry_delay = (7 * 60)
name = 'update_docs'
def __init__(self, build_env=None, force=False, search=True, localmedia=True,
build=None, project=None, version=None):
self.build_env = build_env
self.build_force = force
self.build_search = search
self.build_localmedia = localmedia
self.build = {}
if build is not None:
self.build = build
self.version = {}
if version is not None:
self.version = version
self.project = {}
if project is not None:
self.project = project
def run(self, pk, version_pk=None, build_pk=None, record=True, docker=False,
search=True, force=False, intersphinx=True, localmedia=True,
basic=False, **kwargs):
env_cls = LocalEnvironment
if docker or settings.DOCKER_ENABLE:
env_cls = DockerEnvironment
self.project = self.get_project(pk)
self.version = self.get_version(self.project, version_pk)
self.build = self.get_build(build_pk)
self.build_search = search
self.build_localmedia = localmedia
self.build_force = force
self.build_env = env_cls(project=self.project, version=self.version,
build=self.build, record=record)
with self.build_env:
if self.project.skip:
raise BuildEnvironmentError(
_('Builds for this project are temporarily disabled'))
try:
self.setup_vcs()
except vcs_support_utils.LockTimeout, e:
self.retry(exc=e, throw=False)
raise BuildEnvironmentError(
'Version locked, retrying in 5 minutes.',
status_code=423
)
if self.project.documentation_type == 'auto':
self.update_documentation_type()
self.setup_environment()
# TODO the build object should have an idea of these states, extend
# the model to include an idea of these outcomes
outcomes = self.build_docs()
build_id = self.build.get('id')
# Web Server Tasks
if build_id:
finish_build.delay(
version_pk=self.version.pk,
build_pk=build_id,
hostname=socket.gethostname(),
html=outcomes['html'],
search=outcomes['search'],
localmedia=outcomes['localmedia'],
pdf=outcomes['pdf'],
epub=outcomes['epub'],
)
if self.build_env.failed:
self.send_notifications()
@staticmethod
def get_project(project_pk):
"""
Get project from API
"""
project_data = api_v1.project(project_pk).get()
project = make_api_project(project_data)
return project
@staticmethod
def get_version(project, version_pk):
"""
Ensure we're using a sane version.
"""
if version_pk:
version_data = api_v1.version(version_pk).get()
else:
version_data = (api_v1
.version(project.slug)
.get(slug=LATEST)['objects'][0])
return make_api_version(version_data)
@staticmethod
def get_build(build_pk):
"""
Retrieve build object from API
:param build_pk: Build primary key
"""
build = {}
if build_pk:
build = api_v2.build(build_pk).get()
return dict((key, val) for (key, val) in build.items()
if key not in ['project', 'version', 'resource_uri',
'absolute_uri'])
def update_documentation_type(self):
"""
Force Sphinx for 'auto' documentation type
This used to determine the type and automatically set the documentation
type to Sphinx for rST and Mkdocs for markdown. It now just forces
Sphinx, due to markdown support.
"""
ret = 'sphinx'
project_data = api_v2.project(self.project.pk).get()
project_data['documentation_type'] = ret
api_v2.project(self.project.pk).put(project_data)
self.project.documentation_type = ret
def setup_vcs(self):
"""
Update the checkout of the repo to make sure it's the latest.
This also syncs versions in the DB.
:param build_env: Build environment
"""
self.build_env.update_build(state=BUILD_STATE_CLONING)
log.info(LOG_TEMPLATE
.format(project=self.project.slug,
version=self.version.slug,
msg='Updating docs from VCS'))
try:
update_output = update_imported_docs(self.version.pk)
commit = self.project.vcs_repo(self.version.slug).commit
if commit:
self.build['commit'] = commit
except ProjectImportError:
raise BuildEnvironmentError('Failed to import project',
status_code=404)
def setup_environment(self):
"""
Build the virtualenv and install the project into it.
Always build projects with a virtualenv.
:param build_env: Build environment to pass commands and execution through.
"""
build_dir = os.path.join(
self.project.venv_path(version=self.version.slug),
'build')
self.build_env.update_build(state=BUILD_STATE_INSTALLING)
if os.path.exists(build_dir):
log.info(LOG_TEMPLATE
.format(project=self.project.slug,
version=self.version.slug,
msg='Removing existing build directory'))
shutil.rmtree(build_dir)
site_packages = '--no-site-packages'
if self.project.use_system_packages:
site_packages = '--system-site-packages'
self.build_env.run(
self.project.python_interpreter,
'-mvirtualenv',
site_packages,
self.project.venv_path(version=self.version.slug)
)
# Install requirements
wheeldir = os.path.join(settings.SITE_ROOT, 'deploy', 'wheels')
requirements = [
'sphinx==1.3.1',
'Pygments==2.0.2',
'virtualenv==13.1.0',
'setuptools==18.0.1',
'docutils==0.11',
'mkdocs==0.14.0',
'mock==1.0.1',
'pillow==2.6.1',
'readthedocs-sphinx-ext==0.5.4',
'sphinx-rtd-theme==0.1.8',
'alabaster>=0.7,<0.8,!=0.7.5',
'recommonmark==0.1.1',
]
cmd = [
'python',
self.project.venv_bin(version=self.version.slug, bin='pip'),
'install',
'--use-wheel',
'--find-links={0}'.format(wheeldir),
'-U',
]
if self.project.use_system_packages:
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
cmd.append('-I')
cmd.extend(requirements)
self.build_env.run(
*cmd,
bin_path=self.project.venv_bin(version=self.version.slug)
)
# Handle requirements
requirements_file_path = self.project.requirements_file
checkout_path = self.project.checkout_path(self.version.slug)
if not requirements_file_path:
builder_class = get_builder_class(self.project.documentation_type)
docs_dir = (builder_class(self.build_env)
.docs_dir())
for path in [docs_dir, '']:
for req_file in ['pip_requirements.txt', 'requirements.txt']:
test_path = os.path.join(checkout_path, path, req_file)
if os.path.exists(test_path):
requirements_file_path = test_path
break
if requirements_file_path:
self.build_env.run(
'python',
self.project.venv_bin(version=self.version.slug, bin='pip'),
'install',
'--exists-action=w',
'-r{0}'.format(requirements_file_path),
cwd=checkout_path,
bin_path=self.project.venv_bin(version=self.version.slug)
)
# Handle setup.py
checkout_path = self.project.checkout_path(self.version.slug)
setup_path = os.path.join(checkout_path, 'setup.py')
if os.path.isfile(setup_path) and self.project.use_virtualenv:
if getattr(settings, 'USE_PIP_INSTALL', False):
self.build_env.run(
'python',
self.project.venv_bin(version=self.version.slug, bin='pip'),
'install',
'--ignore-installed',
'.',
cwd=checkout_path,
bin_path=self.project.venv_bin(version=self.version.slug)
)
else:
self.build_env.run(
'python',
'setup.py',
'install',
'--force',
cwd=checkout_path,
bin_path=self.project.venv_bin(version=self.version.slug)
)
def build_docs(self):
"""Wrapper to all build functions
Executes the necessary builds for this task and returns whether the
build was successful or not.
:returns: Build outcomes with keys for html, search, localmedia, pdf,
and epub
:rtype: dict
"""
self.build_env.update_build(state=BUILD_STATE_BUILDING)
before_build.send(sender=self.version)
outcomes = defaultdict(lambda: False)
with self.project.repo_nonblockinglock(
version=self.version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
outcomes['html'] = self.build_docs_html()
outcomes['search'] = self.build_docs_search()
outcomes['localmedia'] = self.build_docs_localmedia()
outcomes['pdf'] = self.build_docs_pdf()
outcomes['epub'] = self.build_docs_epub()
after_build.send(sender=self.version)
return outcomes
def build_docs_html(self):
html_builder = get_builder_class(self.project.documentation_type)(
self.build_env
)
if self.build_force:
html_builder.force()
html_builder.append_conf()
success = html_builder.build()
if success:
html_builder.move()
# Gracefully attempt to move files via task on web workers.
try:
move_files.delay(
version_pk=self.version.pk,
html=True,
hostname=socket.gethostname(),
)
except socket.error:
# TODO do something here
pass
return success
def build_docs_search(self):
'''Build search data with separate build'''
if self.build_search:
if self.project.is_type_mkdocs:
return self.build_docs_class('mkdocs_json')
if self.project.is_type_sphinx:
return self.build_docs_class('sphinx_search')
return False
def build_docs_localmedia(self):
'''Get local media files with separate build'''
if self.build_localmedia:
if self.project.is_type_sphinx:
return self.build_docs_class('sphinx_singlehtmllocalmedia')
return False
def build_docs_pdf(self):
'''Build PDF docs'''
if (self.project.slug in HTML_ONLY or
not self.project.is_type_sphinx or
not self.project.enable_pdf_build):
return False
return self.build_docs_class('sphinx_pdf')
def build_docs_epub(self):
'''Build ePub docs'''
if (self.project.slug in HTML_ONLY or
not self.project.is_type_sphinx or
not self.project.enable_epub_build):
return False
return self.build_docs_class('sphinx_epub')
def build_docs_class(self, builder_class):
"""Build docs with additional doc backends
These steps are not necessarily required for the build to halt, so we
only raise a warning exception here. A hard error will halt the build
process.
"""
builder = get_builder_class(builder_class)(self.build_env)
success = builder.build()
builder.move()
return success
def send_notifications(self):
"""Send notifications on build failure"""
send_notifications.delay(self.version.pk, build_pk=self.build['id'])
update_docs = celery_app.tasks[UpdateDocsTask.name]
@task()
def update_imported_docs(version_pk):
"""
Check out or update the given project's repository.
"""
version_data = api_v1.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
ret_dict = {}
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown".format(project.repo_type)))
with project.repo_nonblockinglock(
version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
before_vcs.send(sender=version)
# Get the actual code on disk
if version:
log.info(
LOG_TEMPLATE.format(
project=project.slug,
version=version.slug,
msg='Checking out version {slug}: {identifier}'.format(
slug=version.slug,
identifier=version.identifier
)
)
)
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.checkout(
version.identifier,
)
else:
# Does this ever get called?
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Updating to latest revision'))
version_slug = LATEST
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.update()
after_vcs.send(sender=version)
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
api_v2.project(project.pk).sync_versions.post(version_post_data)
except Exception, e:
print "Sync Versions Exception: %s" % e.message
return ret_dict
# Web tasks
@task(queue='web')
def finish_build(version_pk, build_pk, hostname=None, html=False,
localmedia=False, search=False, pdf=False, epub=False):
"""
Build Finished, do house keeping bits
"""
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
if html:
version.active = True
version.built = True
version.save()
if not pdf:
clear_pdf_artifacts(version)
if not epub:
clear_epub_artifacts(version)
move_files(
version_pk=version_pk,
hostname=hostname,
html=html,
localmedia=localmedia,
search=search,
pdf=pdf,
epub=epub,
)
symlinks.symlink_cnames(version)
symlinks.symlink_translations(version)
symlinks.symlink_subprojects(version)
if version.project.single_version:
symlinks.symlink_single_version(version)
else:
symlinks.remove_symlink_single_version(version)
# Delayed tasks
update_static_metadata.delay(version.project.pk)
fileify.delay(version.pk, commit=build.commit)
update_search.delay(version.pk, commit=build.commit)
@task(queue='web')
def move_files(version_pk, hostname, html=False, localmedia=False, search=False, pdf=False, epub=False):
version = Version.objects.get(pk=version_pk)
if html:
from_path = version.project.artifact_path(version=version.slug, type=version.project.documentation_type)
target = version.project.rtd_build_path(version.slug)
Syncer.copy(from_path, target, host=hostname)
if 'sphinx' in version.project.documentation_type:
if localmedia:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_localmedia')
to_path = version.project.get_production_media_path(type='htmlzip', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if search:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_search')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
# Always move PDF's because the return code lies.
if pdf:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_pdf')
to_path = version.project.get_production_media_path(type='pdf', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if epub:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_epub')
to_path = version.project.get_production_media_path(type='epub', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if 'mkdocs' in version.project.documentation_type:
if search:
from_path = version.project.artifact_path(version=version.slug, type='mkdocs_json')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
@task(queue='web')
def update_search(version_pk, commit):
version = Version.objects.get(pk=version_pk)
if version.project.is_type_sphinx:
page_list = process_all_json_files(version, build_dir=False)
elif version.project.is_type_mkdocs:
page_list = process_mkdocs_json(version, build_dir=False)
else:
log.error('Unknown documentation type: %s' % version.project.documentation_type)
return
log_msg = ' '.join([page['path'] for page in page_list])
log.info("(Search Index) Sending Data: %s [%s]" % (version.project.slug, log_msg))
index_search_request(
version=version,
page_list=page_list,
commit=commit,
project_scale=0,
page_scale=0,
# Don't index sections to speed up indexing.
# They aren't currently exposed anywhere.
section=False,
)
@task(queue='web')
def fileify(version_pk, commit):
"""
Create ImportedFile objects for all of a version's files.
This is a prereq for indexing the docs for search.
It also causes celery-haystack to kick off an index of the file.
"""
version = Version.objects.get(pk=version_pk)
project = version.project
if not project.cdn_enabled:
return
if not commit:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Imported File not being built because no commit information'))
path = project.rtd_build_path(version.slug)
if path:
log.info(LOG_TEMPLATE.format(
project=version.project.slug, version=version.slug, msg='Creating ImportedFiles'))
_manage_imported_files(version, path, commit)
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='No ImportedFile files'))
def _manage_imported_files(version, path, commit):
changed_files = set()
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
dirpath = os.path.join(root.replace(path, '').lstrip('/'),
filename.lstrip('/'))
full_path = os.path.join(root, filename)
md5 = hashlib.md5(open(full_path, 'rb').read()).hexdigest()
try:
obj, created = ImportedFile.objects.get_or_create(
project=version.project,
version=version,
path=dirpath,
name=filename,
)
except ImportedFile.MultipleObjectsReturned:
log.exception('Error creating ImportedFile')
continue
if obj.md5 != md5:
obj.md5 = md5
changed_files.add(dirpath)
if obj.commit != commit:
obj.commit = commit
obj.save()
# Delete ImportedFiles from previous versions
ImportedFile.objects.filter(project=version.project, version=version).exclude(commit=commit).delete()
# Purge Cache
purge(changed_files)
@task(queue='web')
def send_notifications(version_pk, build_pk):
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
for hook in version.project.webhook_notifications.all():
webhook_notification(version, build, hook.url)
for email in version.project.emailhook_notifications.all().values_list('email', flat=True):
email_notification(version, build, email)
def email_notification(version, build, email):
log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='sending email to: %s' % email))
context = {'version': version,
'project': version.project,
'build': build,
'build_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
build.get_absolute_url()),
'unsub_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
reverse('projects_notifications', args=[version.project.slug])),
}
if build.commit:
title = _('Failed: {project.name} ({commit})').format(commit=build.commit[:8], **context)
else:
title = _('Failed: {project.name} ({version.verbose_name})').format(**context)
send_email(
email,
title,
template='projects/email/build_failed.txt',
template_html='projects/email/build_failed.html',
context=context
)
def webhook_notification(version, build, hook_url):
project = version.project
data = json.dumps({
'name': project.name,
'slug': project.slug,
'build': {
'id': build.id,
'success': build.success,
'date': build.date.strftime('%Y-%m-%d %H:%M:%S'),
}
})
log.debug(LOG_TEMPLATE.format(project=project.slug, version='', msg='sending notification to: %s' % hook_url))
requests.post(hook_url, data=data)
@task(queue='web')
def update_static_metadata(project_pk, path=None):
"""Update static metadata JSON file
Metadata settings include the following project settings:
version
The default version for the project, default: `latest`
language
The default language for the project, default: `en`
languages
List of languages built by linked translation projects.
"""
project = Project.objects.get(pk=project_pk)
if not path:
path = project.static_metadata_path()
log.info(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Updating static metadata',
))
translations = [trans.language for trans in project.translations.all()]
languages = set(translations)
# Convert to JSON safe types
metadata = {
'version': project.default_version,
'language': project.language,
'languages': list(languages),
'single_version': project.single_version,
}
try:
fh = open(path, 'w+')
json.dump(metadata, fh)
fh.close()
Syncer.copy(path, path, host=socket.gethostname(), file=True)
except (AttributeError, IOError) as e:
log.debug(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Cannot write to metadata.json: {0}'.format(e)
))
def update_docs_pull(record=False, force=False):
"""
A high-level interface that will update all of the projects.
This is mainly used from a cronjob or management command.
"""
for version in Version.objects.filter(built=True):
try:
update_docs.run(pk=version.project.pk, version_pk=version.pk,
record=record)
except Exception, e:
log.error("update_docs_pull failed", exc_info=True)
# Random Tasks
@task()
def remove_dir(path):
"""
Remove a directory on the build/celery server.
This is mainly a wrapper around shutil.rmtree so that app servers
can kill things on the build server.
"""
log.info("Removing %s" % path)
shutil.rmtree(path)
@task(queue='web')
def clear_artifacts(version_pk):
""" Remove artifacts from the web servers. """
version = Version.objects.get(pk=version_pk)
clear_pdf_artifacts(version)
clear_epub_artifacts(version)
clear_htmlzip_artifacts(version)
clear_html_artifacts(version)
def clear_pdf_artifacts(version):
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='pdf', version_slug=version.slug))
def clear_epub_artifacts(version):
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='epub', version_slug=version.slug))
def clear_htmlzip_artifacts(version):
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='htmlzip', version_slug=version.slug))
def clear_html_artifacts(version):
run_on_app_servers('rm -rf %s' % version.project.rtd_build_path(version=version.slug))
| {
"content_hash": "167a6dc34651f00a9fcc1d1ae6990dd3",
"timestamp": "",
"source": "github",
"line_count": 827,
"max_line_length": 126,
"avg_line_length": 36.195888754534465,
"alnum_prop": 0.596311886149529,
"repo_name": "hach-que/readthedocs.org",
"id": "ec397f8d081032bd3039c46e5d5b79bfb39460cf",
"size": "29934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/projects/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "68570"
},
{
"name": "HTML",
"bytes": "213690"
},
{
"name": "JavaScript",
"bytes": "436906"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "800534"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
} |
from pyramid.config import Configurator
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('pyramid_chameleon')
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('home', '/')
config.add_route('getAnnotation', 'getAnnotation')
config.add_route('tissueOntology', 'tissueOntology')
config.add_route('getAnnotationRegistry', 'getAnnotationRegistry')
config.scan()
return config.make_wsgi_app()
| {
"content_hash": "477fa967fccbea782e3b3607d1053394",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 74,
"avg_line_length": 38,
"alnum_prop": 0.7052631578947368,
"repo_name": "T2DREAM/t2dream-portal",
"id": "d3df60049cf2cd5849fafaa1f78d50abb3d0ee83",
"size": "570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CDE/cde/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Gherkin",
"bytes": "16776"
},
{
"name": "HTML",
"bytes": "373076"
},
{
"name": "JavaScript",
"bytes": "1320205"
},
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "1567328"
},
{
"name": "SCSS",
"bytes": "336182"
},
{
"name": "Shell",
"bytes": "4199"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="barpolar", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.barpolar.marker.Co
lorBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`plotly.graph_objects.barpolar.marker.Li
ne` instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
""",
),
**kwargs
)
| {
"content_hash": "9124e95634322dd0b83164ad7e91b278",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 48.693693693693696,
"alnum_prop": 0.5469010175763183,
"repo_name": "plotly/python-api",
"id": "3ada20c3c627b484f504b9e933b97c7db6e22dd3",
"size": "5405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/barpolar/_marker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""
Main plugin file.
"""
from __future__ import absolute_import
import configparser
import requests
import webbrowser
from builtins import object
import os.path
import json
from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from qgis.PyQt.QtWidgets import QAction
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsProject
from ee_plugin import provider
from ee_plugin.icons import resources
# read the plugin version from metadata
cfg = configparser.ConfigParser()
cfg.read(os.path.join(os.path.dirname(__file__), 'metadata.txt'))
VERSION = cfg.get('general', 'version')
version_checked = False
class GoogleEarthEnginePlugin(object):
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'GoogleEarthEnginePlugin_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
self.menu_name_plugin = self.tr("Google Earth Engine Plugin")
# Create and register the EE data providers
provider.register_data_provider()
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('GoogleEarthEngine', message)
def initGui(self):
### Main dockwidget menu
# Create action that will start plugin configuration
icon_path = ':/plugins/ee_plugin/icons/earth_engine.svg'
self.dockable_action = QAction(
QIcon(icon_path), "User Guide", self.iface.mainWindow())
# connect the action to the run method
self.dockable_action.triggered.connect(self.run)
# Add menu item
self.iface.addPluginToMenu(self.menu_name_plugin, self.dockable_action)
# Register signal to initialize EE layers on project load
self.iface.projectRead.connect(self.updateLayers)
def run(self):
# open user guide in external web browser
webbrowser.open_new(
"http://qgis-ee-plugin.appspot.com/user-guide")
def check_version(self):
global version_checked
if version_checked:
return
try:
latest_version = requests.get('https://qgis-ee-plugin.appspot.com/get_latest_version').text
if VERSION < latest_version:
self.iface.messageBar().pushMessage('Earth Engine plugin:',
'There is a more recent version of the ee_plugin available {0} and you have {1}, please upgrade!'.format(latest_version, VERSION), duration=15)
except:
print('Error occurred when checking for recent plugin version, skipping ...')
finally:
version_checked = True
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(
self.menu_name_plugin, self.dockable_action)
def updateLayers(self):
import ee
from ee_plugin.utils import add_or_update_ee_layer
layers = QgsProject.instance().mapLayers().values()
for l in filter(lambda layer: layer.customProperty('ee-layer'), layers):
ee_object = l.customProperty('ee-object')
ee_object_vis = l.customProperty('ee-object-vis')
# check for backward-compatibility, older file formats (before 0.0.3) store ee-objects in ee-script property an no ee-object-vis is stored
# also, it seems that JSON representation of persistent object has been changed, making it difficult to read older EE JSON
if ee_object is None:
print('\nWARNING:\n Map layer saved with older version of EE plugin is detected, backward-compatibility for versions before 0.0.3 is not supported due to changes in EE library, please re-create EE layer by re-running the Python script\n')
return
ee_object = ee.deserializer.fromJSON(ee_object)
if ee_object_vis is not None:
ee_object_vis = json.loads(ee_object_vis)
# update loaded EE layer
# get existing values for name, visibility, and opacity
# TODO: this should not be needed, refactor add_or_update_ee_layer to update_ee_layer
name = l.name()
shown = QgsProject.instance().layerTreeRoot().findLayer(l.id()).itemVisibilityChecked()
opacity = l.renderer().opacity()
add_or_update_ee_layer(ee_object, ee_object_vis, name, shown, opacity)
| {
"content_hash": "d6cc272234fe5da3a93838555a7cc04e",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 254,
"avg_line_length": 36.841059602649004,
"alnum_prop": 0.6474923602372821,
"repo_name": "gena/qgis-earthengine-plugin",
"id": "5fcace398778713b15b869febef3993a87c6e5dd",
"size": "5587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ee_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4708"
},
{
"name": "Makefile",
"bytes": "10908"
},
{
"name": "Python",
"bytes": "38634"
},
{
"name": "QML",
"bytes": "1564"
},
{
"name": "Shell",
"bytes": "2447"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/sharnaff/shared_lair_sharnaff_grassland.iff"
result.attribute_template_id = -1
result.stfName("lair_n","sharnaff_grassland")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "ff5a28bfe660c512eae28834f8662caf",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.7071651090342679,
"repo_name": "anhstudios/swganh",
"id": "36c7e0751a347e83c35ada891d6886f58d031a22",
"size": "466",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/lair/sharnaff/shared_lair_sharnaff_grassland.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..')) # NOTE(dstanek): path for our
# Sphinx extension
# NOTE(dstanek): adds _ to the builtins so keystone modules can be imported
__builtins__['_'] = str
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'oslosphinx',
# NOTE(dstanek): Uncomment the [pbr] section in setup.cfg and
# remove this Sphinx extension when
# https://launchpad.net/bugs/1260495 is fixed.
'ext.apidoc',
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# if os.getenv('HUDSON_PUBLISH_DOCS'):
# templates_path = ['_ga', '_templates']
# else:
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'keystone'
copyright = u'2012, OpenStack, LLC'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['old']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['keystone.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/keystone-manage', 'keystone-manage', u'Keystone Management Utility',
[u'OpenStack'], 1),
('man/keystone-all', 'keystone-all', u'Keystone Startup Command',
[u'OpenStack'], 1),
]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'keystonedoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples (source
# start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'keystone.tex', u'Keystone Documentation',
u'OpenStack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'keystone', u'Keystone Documentation',
u'OpenStack', 'keystone', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "69d625d2647d9e059f0b810a8df945f0",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 79,
"avg_line_length": 32.70161290322581,
"alnum_prop": 0.6802712700369914,
"repo_name": "rodrigods/keystone",
"id": "ce4bbd480ed37a773392a61d15299d9203bc6d54",
"size": "9054",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2971055"
},
{
"name": "Shell",
"bytes": "10635"
}
],
"symlink_target": ""
} |
""" defines class _DbConnect_, for abstracting connections to databases
"""
from __future__ import print_function
from rdkit import RDConfig
import sys,types
class DbError(RuntimeError):
pass
from rdkit.Dbase import DbUtils,DbInfo,DbModule
class DbConnect(object):
""" This class is intended to abstract away many of the details of
interacting with databases.
It includes some GUI functionality
"""
def __init__(self,dbName='',tableName='',user='sysdba',password='masterkey'):
""" Constructor
**Arguments** (all optional)
- dbName: the name of the DB file to be used
- tableName: the name of the table to be used
- user: the username for DB access
- password: the password to be used for DB access
"""
self.dbName = dbName
self.tableName = tableName
self.user = user
self.password = password
self.cn = None
self.cursor = None
def UpdateTableNames(self,dlg):
""" Modifies a connect dialog to reflect new table names
**Arguments**
- dlg: the dialog to be updated
"""
self.user = self.userEntry.GetValue()
self.password = self.passwdEntry.GetValue()
self.dbName = self.dbBrowseButton.GetValue()
for i in xrange(self.dbTableChoice.Number()):
self.dbTableChoice.Delete(0)
names = self.GetTableNames()
for name in names:
self.dbTableChoice.Append(name)
dlg.sizer.Fit(dlg)
dlg.sizer.SetSizeHints(dlg)
dlg.Refresh()
def GetTableNames(self,includeViews=0):
""" gets a list of tables available in a database
**Arguments**
- includeViews: if this is non-null, the views in the db will
also be returned
**Returns**
a list of table names
**Notes**
- this uses _DbInfo.GetTableNames_
"""
return DbInfo.GetTableNames(self.dbName,self.user,self.password,
includeViews=includeViews,cn=self.cn)
def GetColumnNames(self,table='',join='',what='*',where='',**kwargs):
""" gets a list of columns available in the current table
**Returns**
a list of column names
**Notes**
- this uses _DbInfo.GetColumnNames_
"""
if not table: table = self.tableName
return DbInfo.GetColumnNames(self.dbName,table,
self.user,self.password,
join=join,what=what,cn=self.cn)
def GetColumnNamesAndTypes(self,table='',join='',what='*',where='',**kwargs):
""" gets a list of columns available in the current table along with their types
**Returns**
a list of 2-tuples containing:
1) column name
2) column type
**Notes**
- this uses _DbInfo.GetColumnNamesAndTypes_
"""
if not table: table = self.tableName
return DbInfo.GetColumnNamesAndTypes(self.dbName,table,
self.user,self.password,
join=join,what=what,cn=self.cn)
def GetColumns(self,fields,table='',join='',**kwargs):
""" gets a set of data from a table
**Arguments**
- fields: a string with the names of the fields to be extracted,
this should be a comma delimited list
**Returns**
a list of the data
**Notes**
- this uses _DbUtils.GetColumns_
"""
if not table: table = self.tableName
return DbUtils.GetColumns(self.dbName,table,fields,
self.user,self.password,
join=join)
def GetData(self,table=None,fields='*',where='',removeDups=-1,join='',
transform=None,randomAccess=1,**kwargs):
""" a more flexible method to get a set of data from a table
**Arguments**
- table: (optional) the table to use
- fields: a string with the names of the fields to be extracted,
this should be a comma delimited list
- where: the SQL where clause to be used with the DB query
- removeDups: indicates which column should be used to recognize
duplicates in the data. -1 for no duplicate removal.
**Returns**
a list of the data
**Notes**
- this uses _DbUtils.GetData_
"""
if table is None:
table = self.tableName
kwargs['forceList'] = kwargs.get('forceList',0)
return DbUtils.GetData(self.dbName,table,fieldString=fields,whereString=where,
user=self.user,password=self.password,removeDups=removeDups,
join=join,cn=self.cn,
transform=transform,randomAccess=randomAccess,**kwargs)
def GetDataCount(self,table=None,where='',join='',**kwargs):
""" returns a count of the number of results a query will return
**Arguments**
- table: (optional) the table to use
- where: the SQL where clause to be used with the DB query
- join: the SQL join clause to be used with the DB query
**Returns**
an int
**Notes**
- this uses _DbUtils.GetData_
"""
if table is None:
table = self.tableName
return DbUtils.GetData(self.dbName,table,fieldString='count(*)',
whereString=where,cn=self.cn,
user=self.user,password=self.password,join=join,forceList=0)[0][0]
def GetCursor(self):
""" returns a cursor for direct manipulation of the DB
only one cursor is available
"""
if self.cursor is not None:
return self.cursor
self.cn = DbModule.connect(self.dbName,self.user,self.password)
self.cursor = self.cn.cursor()
return self.cursor
def KillCursor(self):
""" closes the cursor
"""
self.cursor = None
self.cn = None
def AddTable(self,tableName,colString):
""" adds a table to the database
**Arguments**
- tableName: the name of the table to add
- colString: a string containing column defintions
**Notes**
- if a table named _tableName_ already exists, it will be dropped
- the sqlQuery for addition is: "create table %(tableName) (%(colString))"
"""
c = self.GetCursor()
try:
c.execute('drop table %s cascade'%tableName)
except:
try:
c.execute('drop table %s'%tableName)
except:
pass
self.Commit()
addStr = 'create table %s (%s)'%(tableName,colString)
try:
c.execute(addStr)
except:
import traceback
print('command failed:',addStr)
traceback.print_exc()
else:
self.Commit()
def InsertData(self,tableName,vals):
""" inserts data into a table
**Arguments**
- tableName: the name of the table to manipulate
- vals: a sequence with the values to be inserted
"""
c = self.GetCursor()
if type(vals) != types.TupleType:
vals = tuple(vals)
insTxt = '('+','.join([DbModule.placeHolder]*len(vals))+')'
#insTxt = '(%s'%('%s,'*len(vals))
#insTxt = insTxt[0:-1]+')'
cmd = "insert into %s values %s"%(tableName,insTxt)
try:
c.execute(cmd,vals)
except:
import traceback
print('insert failed:')
print(cmd)
print('the error was:')
traceback.print_exc()
raise DbError("Insert Failed")
def InsertColumnData(self,tableName,columnName,value,where):
""" inserts data into a particular column of the table
**Arguments**
- tableName: the name of the table to manipulate
- columnName: name of the column to update
- value: the value to insert
- where: a query yielding the row where the data should be inserted
"""
c = self.GetCursor()
cmd = "update %s set %s=%s where %s"%(tableName,columnName,
DbModule.placeHolder,where)
c.execute(cmd,(value,))
def AddColumn(self,tableName,colName,colType):
""" adds a column to a table
**Arguments**
- tableName: the name of the table to manipulate
- colName: name of the column to insert
- colType: the type of the column to add
"""
c = self.GetCursor()
try:
c.execute("alter table %s add %s %s"%(tableName,colName,colType))
except:
print('AddColumn failed')
def Commit(self):
""" commits the current transaction
"""
self.cn.commit()
| {
"content_hash": "93b65a9f1c807e32f7cb744e94bae52c",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 93,
"avg_line_length": 25.45671641791045,
"alnum_prop": 0.5935741088180112,
"repo_name": "AlexanderSavelyev/rdkit",
"id": "8906b15af69e213dd4ccf4a8ae3d3a91714c590d",
"size": "8827",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rdkit/Dbase/DbConnection.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "319851"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "6485466"
},
{
"name": "CSS",
"bytes": "4742"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "Java",
"bytes": "244997"
},
{
"name": "JavaScript",
"bytes": "12260"
},
{
"name": "Makefile",
"bytes": "3243"
},
{
"name": "Objective-C",
"bytes": "299"
},
{
"name": "Perl",
"bytes": "2032"
},
{
"name": "Python",
"bytes": "2941698"
},
{
"name": "R",
"bytes": "474"
},
{
"name": "Shell",
"bytes": "8899"
}
],
"symlink_target": ""
} |
"""
tests.test_finalizers
~~~~~~~~~~~~~~~~~~~~~
"""
from yaspin import yaspin
def test_freeze(final_text):
sp = yaspin()
sp._freeze(final_text)
assert isinstance(sp._last_frame, str)
assert sp._last_frame[-1] == "\n"
def test_ok(final_text):
sp = yaspin()
sp.ok(final_text)
assert isinstance(sp._last_frame, str)
assert sp._last_frame[-1] == "\n"
def test_ok_empty_case():
sp = yaspin()
sp.ok()
assert "OK" in sp._last_frame
def test_fail(final_text):
sp = yaspin()
sp.fail(final_text)
assert isinstance(sp._last_frame, str)
assert sp._last_frame[-1] == "\n"
def test_fail_empty_case():
sp = yaspin()
sp.fail()
assert "FAIL" in sp._last_frame
| {
"content_hash": "0381190e90eb0afb2cb60e4e50520cf2",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 42,
"avg_line_length": 16.976744186046513,
"alnum_prop": 0.5821917808219178,
"repo_name": "pavdmyt/yaspin",
"id": "ca6ca2834ee7892c777609d1d6489bc9aedd98a7",
"size": "730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_finalizers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2520"
},
{
"name": "Python",
"bytes": "53614"
}
],
"symlink_target": ""
} |
import random
import subprocess
# Which server to use:
serverPath = "http://127.0.0.1:8888"
# Which of the servelets to test:
apiPath = "/signup"
randomRange = 20
mobSize = 100
fd = open("resultsignuptest2.txt", "w+")
for i in range(1, mobSize + 1):
data = "username=user" + str(random.randint(1, randomRange)) + "&email=user" + str(random.randint(1, randomRange)) + "@mail.com&password=asdfjkl;&repeatPassword=asdfjkl;"
subprocess.Popen(["curl", "-s", "--data", data, serverPath+apiPath], stdout=fd)
| {
"content_hash": "e8e4acaf5b420199bf639d3977a80ae6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 174,
"avg_line_length": 30.235294117647058,
"alnum_prop": 0.6867704280155642,
"repo_name": "witterk/thai-accounting",
"id": "6b0f1035308bd9850e6d6fde91f081cccb5f5471",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "load-testing/signuptest2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18978"
},
{
"name": "Java",
"bytes": "1096324"
},
{
"name": "JavaScript",
"bytes": "137045"
},
{
"name": "Perl",
"bytes": "1972"
},
{
"name": "Python",
"bytes": "1463"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="scatterpolargl", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.scatterpolargl.mar
ker.ColorBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
line
:class:`plotly.graph_objects.scatterpolargl.mar
ker.Line` instance or dict with compatible
properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for symbol .
""",
),
**kwargs
)
| {
"content_hash": "b72e42987daffbd5958421b7a66f74d5",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 85,
"avg_line_length": 48.1,
"alnum_prop": 0.5418770418770419,
"repo_name": "plotly/python-api",
"id": "c04ae34bb415a783153d16e5ae39bf7fe133cf41",
"size": "6734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolargl/_marker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from django.contrib.admin.models import LogEntry
from django.contrib.syndication.views import Feed
class AdminLogFeed(Feed):
title = "DnDTools.eu changes"
link = ''
description = "What our great admins are working on right now."
def items(self):
log_list = LogEntry.objects.filter(
content_type__app_label='dnd').order_by('-action_time').all()[:50]
result = []
for entry in log_list:
if False:
entry = LogEntry()
change_message = entry.change_message
url = None
object_rep = entry.object_repr
try:
obj = entry.get_edited_object()
object_rep = obj.__unicode__()
url = obj.get_absolute_url()
except Exception:
pass
result.append({
'object_rep': object_rep,
'change_message': change_message,
'url': url,
'is_addition': entry.is_addition(),
'is_change': entry.is_change(),
'is_deletion': entry.is_deletion(),
'author': entry.user,
'pub_date': entry.action_time
})
return result
def item_title(self, item):
if item['is_change']:
return 'Changed %s' % item['object_rep']
elif item['is_addition']:
return 'Added %s' % item['object_rep']
else:
return 'Deleted %s' % item['object_rep']
def item_description(self, item):
if item['is_change']:
base = '%s -- %s' % (item['object_rep'], item['change_message'])
elif item['is_addition']:
base = 'Added %s' % item['object_rep']
else:
base = 'Deleted %s' % item['object_rep']
return "%s<br/><br/>(%s)" % (base, item['author'].first_name)
def item_author_name(self, item):
return item['author'].first_name
def item_link(self, item):
if item['url']:
return item['url']
return ''
def item_pubdate(self, item):
return item['pub_date']
| {
"content_hash": "fdd06ef1c087db5c5e0ad35b1fbf1a79",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 30.098591549295776,
"alnum_prop": 0.5091249415067852,
"repo_name": "gregpechiro/dndtools",
"id": "9cbc958b6b93dddf4c4ed30c8a559045e94fcd38",
"size": "2161",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dndtools/dnd/feeds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91256"
},
{
"name": "HTML",
"bytes": "190011"
},
{
"name": "JavaScript",
"bytes": "80639"
},
{
"name": "Python",
"bytes": "2775119"
}
],
"symlink_target": ""
} |
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer, RESPONSE_HEADER_SIZE, EVENT_HEADER_SIZE
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
from hazelcast.protocol.builtin import CodecUtil
# hex: 0x060B00
_REQUEST_MESSAGE_TYPE = 396032
# hex: 0x060B01
_RESPONSE_MESSAGE_TYPE = 396033
# hex: 0x060B02
_EVENT_ITEM_MESSAGE_TYPE = 396034
_REQUEST_INCLUDE_VALUE_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_LOCAL_ONLY_OFFSET = _REQUEST_INCLUDE_VALUE_OFFSET + BOOLEAN_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_LOCAL_ONLY_OFFSET + BOOLEAN_SIZE_IN_BYTES
_RESPONSE_RESPONSE_OFFSET = RESPONSE_HEADER_SIZE
_EVENT_ITEM_UUID_OFFSET = EVENT_HEADER_SIZE
_EVENT_ITEM_EVENT_TYPE_OFFSET = _EVENT_ITEM_UUID_OFFSET + UUID_SIZE_IN_BYTES
def encode_request(name, include_value, local_only):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_boolean(buf, _REQUEST_INCLUDE_VALUE_OFFSET, include_value)
FixSizedTypesCodec.encode_boolean(buf, _REQUEST_LOCAL_ONLY_OFFSET, local_only)
StringCodec.encode(buf, name, True)
return OutboundMessage(buf, False)
def decode_response(msg):
initial_frame = msg.next_frame()
return FixSizedTypesCodec.decode_uuid(initial_frame.buf, _RESPONSE_RESPONSE_OFFSET)
def handle(msg, handle_item_event=None):
message_type = msg.get_message_type()
if message_type == _EVENT_ITEM_MESSAGE_TYPE and handle_item_event is not None:
initial_frame = msg.next_frame()
uuid = FixSizedTypesCodec.decode_uuid(initial_frame.buf, _EVENT_ITEM_UUID_OFFSET)
event_type = FixSizedTypesCodec.decode_int(initial_frame.buf, _EVENT_ITEM_EVENT_TYPE_OFFSET)
item = CodecUtil.decode_nullable(msg, DataCodec.decode)
handle_item_event(item, uuid, event_type)
return
| {
"content_hash": "ab191c60baa11cb3de231735b09740c9",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 146,
"avg_line_length": 45.5,
"alnum_prop": 0.7652347652347652,
"repo_name": "hazelcast/hazelcast-python-client",
"id": "987558fdfab00f6ca88d12c599ae493d37b6683b",
"size": "2002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hazelcast/protocol/codec/set_add_listener_codec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2300326"
},
{
"name": "Shell",
"bytes": "1900"
}
],
"symlink_target": ""
} |
from pyAccess import utils as pAutils
def test1_strip():
for txt in [' test', 'test ', ' test ']:
assert pAutils.strip(txt) == 'test'
def test2_strip():
assert pAutils.strip(list('test')) == list('test')
| {
"content_hash": "6f47ed4458d1ed9829843d8026b9aee3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 24.77777777777778,
"alnum_prop": 0.6143497757847534,
"repo_name": "AA-ALERT/pyAccess",
"id": "29dbd3353692d0d9c685f561abbfd2eff1020f29",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34597"
}
],
"symlink_target": ""
} |
"""
API for Web Map Service (WMS) methods and metadata.
Currently supports only version 1.1.1 of the WMS protocol.
"""
import cgi
from urllib import urlencode
from etree import etree
from .util import openURL
class ServiceException(Exception):
"""WMS ServiceException
Attributes:
message -- short error message
xml -- full xml error message from server
"""
def __init__(self, message, xml):
self.message = message
self.xml = xml
def __str__(self):
return repr(self.message)
class CapabilitiesError(Exception):
pass
class WebMapService(object):
"""Abstraction for OGC Web Map Service (WMS).
Implements IWebMapService.
"""
def __getitem__(self,name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError, "No content named %s" % name
def __init__(self, url, version='1.1.1', xml=None,
username=None, password=None
):
"""Initialize."""
self.url = url
self.username = username
self.password = password
self.version = version
self._capabilities = None
# Authentication handled by Reader
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
if xml:
#read from stored xml
self._capabilities = reader.readString(xml)
else:
#read from server
self._capabilities = reader.read(self.url)
#build metadata objects
self._buildMetadata()
def _getcapproperty(self):
if not self._capabilities:
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
self._capabilities = ServiceMetadata(reader.read(self.url))
return self._capabilities
def _buildMetadata(self):
''' set up capabilities metadata objects '''
#serviceIdentification metadata
serviceelem=self._capabilities.find('Service')
self.identification=ServiceIdentification(serviceelem, self.version)
#serviceProvider metadata
self.provider=ServiceProvider(serviceelem)
#serviceOperations metadata
self.operations=[]
for elem in self._capabilities.find('Capability/Request')[:]:
self.operations.append(OperationMetadata(elem))
#serviceContents metadata: our assumption is that services use a top-level
#layer as a metadata organizer, nothing more.
self.contents={}
caps = self._capabilities.find('Capability')
for elem in caps.findall('Layer'):
cm=ContentMetadata(elem)
self.contents[cm.id]=cm
for subelem in elem.findall('Layer'):
subcm=ContentMetadata(subelem, cm)
self.contents[subcm.id]=subcm
#exceptions
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
def getcapabilities(self):
"""Request and return capabilities document from the WMS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
u = self._open(reader.capabilities_url(self.url))
# check for service exceptions, and return
if u.info().gettype() == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = str(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getmap(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor='#FFFFFF',
exceptions='application/vnd.ogc.se_xml',
method='Get'
):
"""Request and return an image from the WMS as a file-like object.
Parameters
----------
layers : list
List of content layer names.
styles : list
Optional list of named styles, must be the same length as the
layers list.
srs : string
A spatial reference system identifier.
bbox : tuple
(left, bottom, right, top) in srs units.
format : string
Output image format such as 'image/jpeg'.
size : tuple
(width, height) in pixels.
transparent : bool
Optional. Transparent background if True.
bgcolor : string
Optional. Image background color.
method : string
Optional. HTTP DCP method name: Get or Post.
Example
-------
>>> img = wms.getmap(layers=['global_mosaic'],
... styles=['visual'],
... srs='EPSG:4326',
... bbox=(-112,36,-106,41),
... format='image/jpeg',
... size=(300,250),
... transparent=True,
... )
>>> out = open('example.jpg', 'wb')
>>> out.write(img.read())
>>> out.close()
"""
base_url = self.getOperationByName('GetMap').methods[method]['url']
request = {'version': self.version, 'request': 'GetMap'}
# check layers and styles
assert len(layers) > 0
request['layers'] = ','.join(layers)
if styles:
assert len(styles) == len(layers)
request['styles'] = ','.join(styles)
else:
request['styles'] = ''
# size
request['width'] = str(size[0])
request['height'] = str(size[1])
request['srs'] = str(srs)
request['bbox'] = ','.join([str(x) for x in bbox])
request['format'] = str(format)
request['transparent'] = str(transparent).upper()
request['bgcolor'] = '0x' + bgcolor[1:7]
request['exceptions'] = str(exceptions)
if time is not None:
request['time'] = str(time)
data = urlencode(request)
u = openURL(base_url, data, method, username = self.username, password = self.password)
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = str(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getServiceXML(self):
xml = None
if self._capabilities:
xml = etree.tostring(self._capabilities)
return xml
def getfeatureinfo(self):
raise NotImplementedError
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class ServiceIdentification(object):
''' Implements IServiceIdentificationMetadata '''
def __init__(self, infoset, version):
self._root=infoset
self.type = self._root.find('Name').text
self.version = version
self.title = self._root.find('Title').text
abstract = self._root.find('Abstract')
if abstract is not None:
self.abstract = abstract.text
else:
self.abstract = None
self.keywords = [f.text for f in self._root.findall('KeywordList/Keyword')]
accessconstraints=self._root.find('AccessConstraints')
if accessconstraints is not None:
self.accessconstraints = accessconstraints.text
fees = self._root.find('Fees')
if fees is not None:
self.fees = fees.text
class ServiceProvider(object):
''' Implements IServiceProviderMetatdata '''
def __init__(self, infoset):
self._root=infoset
name=self._root.find('ContactInformation/ContactPersonPrimary/ContactOrganization')
if name is not None:
self.name=name.text
else:
self.name=None
self.url=self._root.find('OnlineResource').attrib.get('{http://www.w3.org/1999/xlink}href', '')
#contact metadata
contact = self._root.find('ContactInformation')
## sometimes there is a contact block that is empty, so make
## sure there are children to parse
if contact is not None and contact[:] != []:
self.contact = ContactMetadata(contact)
else:
self.contact = None
def getContentByName(self, name):
"""Return a named content item."""
for item in self.contents:
if item.name == name:
return item
raise KeyError, "No content named %s" % name
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class ContentMetadata:
"""
Abstraction for WMS layer metadata.
Implements IContentMetadata.
"""
def __init__(self, elem, parent=None):
self.parent = parent
if elem.tag != 'Layer':
raise ValueError('%s should be a Layer' % (elem,))
for key in ('Name', 'Title'):
val = elem.find(key)
if val is not None:
setattr(self, key.lower(), val.text.strip())
else:
setattr(self, key.lower(), None)
self.id=self.name #conform to new interface
# bboxes
b = elem.find('BoundingBox')
self.boundingBox = None
if b is not None:
try: #sometimes the SRS attribute is (wrongly) not provided
srs=b.attrib['SRS']
except KeyError:
srs=None
self.boundingBox = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
srs,
)
elif self.parent:
if hasattr(self.parent, 'boundingBox'):
self.boundingBox = self.parent.boundingBox
attribution = elem.find('Attribution')
if attribution is not None:
self.attribution = dict()
title = attribution.find('Title')
url = attribution.find('OnlineResource')
logo = attribution.find('LogoURL')
if title is not None:
self.attribution['title'] = title.text
if url is not None:
self.attribution['url'] = url.attrib['{http://www.w3.org/1999/xlink}href']
if logo is not None:
self.attribution['logo_size'] = (int(logo.attrib['width']), int(logo.attrib['height']))
self.attribution['logo_url'] = logo.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
b = elem.find('LatLonBoundingBox')
if b is not None:
self.boundingBoxWGS84 = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
)
elif self.parent:
self.boundingBoxWGS84 = self.parent.boundingBoxWGS84
else:
self.boundingBoxWGS84 = None
#SRS options
self.crsOptions = []
#Copy any parent SRS options (they are inheritable properties)
if self.parent:
self.crsOptions = list(self.parent.crsOptions)
#Look for SRS option attached to this layer
if elem.find('SRS') is not None:
## some servers found in the wild use a single SRS
## tag containing a whitespace separated list of SRIDs
## instead of several SRS tags. hence the inner loop
for srslist in map(lambda x: x.text, elem.findall('SRS')):
if srslist:
for srs in srslist.split():
self.crsOptions.append(srs)
#Get rid of duplicate entries
self.crsOptions = list(set(self.crsOptions))
#Set self.crsOptions to None if the layer (and parents) had no SRS options
if len(self.crsOptions) == 0:
#raise ValueError('%s no SRS available!?' % (elem,))
#Comment by D Lowe.
#Do not raise ValueError as it is possible that a layer is purely a parent layer and does not have SRS specified. Instead set crsOptions to None
self.crsOptions=None
#Styles
self.styles = {}
#Copy any parent styles (they are inheritable properties)
if self.parent:
self.styles = self.parent.styles.copy()
#Get the styles for this layer (items with the same name are replaced)
for s in elem.findall('Style'):
name = s.find('Name')
title = s.find('Title')
if name is None or title is None:
raise ValueError('%s missing name or title' % (s,))
style = { 'title' : title.text }
# legend url
legend = s.find('LegendURL/OnlineResource')
if legend is not None:
style['legend'] = legend.attrib['{http://www.w3.org/1999/xlink}href']
self.styles[name.text] = style
# keywords
self.keywords = [f.text for f in elem.findall('KeywordList/Keyword')]
# timepositions - times for which data is available.
self.timepositions=None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='time':
self.timepositions=extent.text.split(',')
break
self.layers = []
for child in elem.findall('Layer'):
self.layers.append(ContentMetadata(child, self))
def __str__(self):
return 'Layer Name: %s Title: %s' % (self.name, self.title)
class OperationMetadata:
"""Abstraction for WMS OperationMetadata.
Implements IOperationMetadata.
"""
def __init__(self, elem):
"""."""
self.name = elem.tag
# formatOptions
self.formatOptions = [f.text for f in elem.findall('Format')]
methods = []
for verb in elem.findall('DCPType/HTTP/*'):
url = verb.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
methods.append((verb.tag, {'url': url}))
self.methods = dict(methods)
class ContactMetadata:
"""Abstraction for contact details advertised in GetCapabilities.
"""
def __init__(self, elem):
name = elem.find('ContactPersonPrimary/ContactPerson')
if name is not None:
self.name=name.text
else:
self.name=None
email = elem.find('ContactElectronicMailAddress')
if email is not None:
self.email=email.text
else:
self.email=None
self.address = self.city = self.region = None
self.postcode = self.country = None
address = elem.find('ContactAddress')
if address is not None:
street = address.find('Address')
if street is not None: self.address = street.text
city = address.find('City')
if city is not None: self.city = city.text
region = address.find('StateOrProvince')
if region is not None: self.region = region.text
postcode = address.find('PostCode')
if postcode is not None: self.postcode = postcode.text
country = address.find('Country')
if country is not None: self.country = country.text
organization = elem.find('ContactPersonPrimary/ContactOrganization')
if organization is not None: self.organization = organization.text
else:self.organization = None
position = elem.find('ContactPosition')
if position is not None: self.position = position.text
else: self.position = None
class WMSCapabilitiesReader:
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.1.1', url=None, un=None, pw=None):
"""Initialize"""
self.version = version
self._infoset = None
self.url = url
self.username = un
self.password = pw
#if self.username and self.password:
## Provide login information in order to use the WMS server
## Create an OpenerDirector with support for Basic HTTP
## Authentication...
#passman = HTTPPasswordMgrWithDefaultRealm()
#passman.add_password(None, self.url, self.username, self.password)
#auth_handler = HTTPBasicAuthHandler(passman)
#opener = build_opener(auth_handler)
#self._open = opener.open
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WMS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url):
"""Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
version, and request parameters
"""
getcaprequest = self.capabilities_url(service_url)
#now split it up again to use the generic openURL function...
spliturl=getcaprequest.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username = self.username, password = self.password)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WMS capabilities document, returning an elementtree instance
string should be an XML capabilities document
"""
if not isinstance(st, str):
raise ValueError("String must be of type string, not %s" % type(st))
return etree.fromstring(st)
| {
"content_hash": "1f57e899b000550892fd04092c304b97",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 156,
"avg_line_length": 36.25607476635514,
"alnum_prop": 0.5604990462442646,
"repo_name": "sabman/OWSLib",
"id": "1d4ee5bac15ee328fb7e762007842f2a8b603032",
"size": "19809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "owslib/wms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "251650"
}
],
"symlink_target": ""
} |
from . import validator, prefixlist, dao
from time import sleep
from expandas.loader import RIPERESTLoader
import threading
class UpdaterWorker(threading.Thread):
def __init__(self, validator, rpsl_objects):
threading.Thread.__init__(self)
self.threadId = 1
self.name = "pre-fixlist.UpdaterWorker"
self.validator = validator
self.rpsl_objects = rpsl_objects
self.loader = RIPERESTLoader()
self.interval = 300
print(self)
def run(self):
while True:
print(".")
for rpsl_object in self.rpsl_objects:
print("Fetching {}".format(rpsl_object))
as_set = self.loader.load_asset(rpsl_object)
new = prefixlist.PrefixList.from_asset(as_set)
old = prefixlist.PrefixList(rpsl_object)
self.validator.validate(new, old)
new.debug()
sleep(self.interval)
| {
"content_hash": "f25cced8b4cbb84e3c44fb80db888237",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 62,
"avg_line_length": 35.25925925925926,
"alnum_prop": 0.5987394957983193,
"repo_name": "emjemj/pre-fixlist",
"id": "d7c1786556f6bf10dcb042ce69b35bd8ffb58645",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prefixlist/updater.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PLSQL",
"bytes": "3413"
},
{
"name": "Python",
"bytes": "15124"
}
],
"symlink_target": ""
} |
"""Update library's tags using MusicBrainz.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets.plugins import BeetsPlugin
from beets import autotag, library, ui, util
from beets.autotag import hooks
from collections import defaultdict
def apply_item_changes(lib, item, move, pretend, write):
"""Store, move and write the item according to the arguments.
"""
if not pretend:
# Move the item if it's in the library.
if move and lib.directory in util.ancestry(item.path):
item.move(with_album=False)
if write:
item.try_write()
item.store()
class MBSyncPlugin(BeetsPlugin):
def __init__(self):
super(MBSyncPlugin, self).__init__()
def commands(self):
cmd = ui.Subcommand('mbsync',
help='update metadata from musicbrainz')
cmd.parser.add_option('-p', '--pretend', action='store_true',
help='show all changes but do nothing')
cmd.parser.add_option('-m', '--move', action='store_true',
dest='move',
help="move files in the library directory")
cmd.parser.add_option('-M', '--nomove', action='store_false',
dest='move',
help="don't move files in library")
cmd.parser.add_option('-W', '--nowrite', action='store_false',
default=None, dest='write',
help="don't write updated metadata to files")
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the mbsync function.
"""
move = ui.should_move(opts.move)
pretend = opts.pretend
write = ui.should_write(opts.write)
query = ui.decargs(args)
self.singletons(lib, query, move, pretend, write)
self.albums(lib, query, move, pretend, write)
def singletons(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for items matched by
query.
"""
for item in lib.items(query + ['singleton:true']):
item_formatted = format(item)
if not item.mb_trackid:
self._log.info(u'Skipping singleton with no mb_trackid: {0}',
item_formatted)
continue
# Get the MusicBrainz recording info.
track_info = hooks.track_for_mbid(item.mb_trackid)
if not track_info:
self._log.info(u'Recording ID not found: {0} for track {0}',
item.mb_trackid,
item_formatted)
continue
# Apply.
with lib.transaction():
autotag.apply_item_metadata(item, track_info)
apply_item_changes(lib, item, move, pretend, write)
def albums(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for albums matched by
query and their items.
"""
# Process matching albums.
for a in lib.albums(query):
album_formatted = format(a)
if not a.mb_albumid:
self._log.info(u'Skipping album with no mb_albumid: {0}',
album_formatted)
continue
items = list(a.items())
# Get the MusicBrainz album information.
album_info = hooks.album_for_mbid(a.mb_albumid)
if not album_info:
self._log.info(u'Release ID {0} not found for album {1}',
a.mb_albumid,
album_formatted)
continue
# Map recording MBIDs to their information. Recordings can appear
# multiple times on a release, so each MBID maps to a list of
# TrackInfo objects.
track_index = defaultdict(list)
for track_info in album_info.tracks:
track_index[track_info.track_id].append(track_info)
# Construct a track mapping according to MBIDs. This should work
# for albums that have missing or extra tracks. If there are
# multiple copies of a recording, they are disambiguated using
# their disc and track number.
mapping = {}
for item in items:
candidates = track_index[item.mb_trackid]
if len(candidates) == 1:
mapping[item] = candidates[0]
else:
for c in candidates:
if (c.medium_index == item.track and
c.medium == item.disc):
mapping[item] = c
break
# Apply.
self._log.debug('applying changes to {}', album_formatted)
with lib.transaction():
autotag.apply_metadata(album_info, mapping)
changed = False
for item in items:
item_changed = ui.show_model_changes(item)
changed |= item_changed
if item_changed:
apply_item_changes(lib, item, move, pretend, write)
if not changed:
# No change to any item.
continue
if not pretend:
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
a[key] = items[0][key]
a.store()
# Move album art (and any inconsistent items).
if move and lib.directory in util.ancestry(items[0].path):
self._log.debug(u'moving album {0}', album_formatted)
a.move()
| {
"content_hash": "0efe510dfec26e295ccfe0fb4e46911a",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 78,
"avg_line_length": 40.13907284768212,
"alnum_prop": 0.5164164329318595,
"repo_name": "LordSputnik/beets",
"id": "00eb966fada2c8a65297bdab20d496ca02d7fb0a",
"size": "6733",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "beetsplug/mbsync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1671957"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
import shutil
import tempfile
import threading
import unittest
import datetime
from dashi import bootstrap
from eeagent import agent
import os
import time
import uuid
from eeagent.agent import EEAgentMain, MainRunnerThread
import simplejson as json
from eeagent.client import EEAgentClient
from eeagent.types import EEAgentLaunchType
from eeagent.util import timeout_poll, _set_param_or_default, validate_config
g_slot_count=3
g_timeout=5
def _get_cmd_args():
global g_slot_count
global g_timeout
memory_name = str(uuid.uuid4()).split("-")[0]
pdname = str(uuid.uuid4()).split("-")[0]
eename = str(uuid.uuid4()).split("-")[0]
tmp_dir = tempfile.mkdtemp(prefix="/tmp/supd")
cmd_line_args = [
"fakeexe",
"--server.memory.name=%s" % (memory_name),
"--eeagent.launch_type.name=pyon_single",
"--eeagent.launch_type.container_args=--noshell",
"--eeagent.launch_type.supd_directory=%s" % (tmp_dir),
"--eeagent.name=%s" % (pdname),
"--eeagent.slots=%d" % (g_slot_count),
"--pd.name=%s" % (eename),
"--dashi.exchange=%s" % (eename),
"--eeagent.heartbeat=%d" % (g_timeout)
]
return cmd_line_args
class PyonRelEEAgentTests(unittest.TestCase):
pdname = str(uuid.uuid4()).split("-")[0]
eename = str(uuid.uuid4()).split("-")[0]
pdname = str(uuid.uuid4()).split("-")[0]
tmp_dir = tempfile.mkdtemp()
memory_name = str(uuid.uuid4()).split("-")[0]
exchange_name = str(uuid.uuid4()).split("-")[0]
timeout=5
slot_count=3
pyon_location_string = 'PYON_LOCATION'
skip = pyon_location_string not in os.environ
cmd_line_args = _get_cmd_args()
if not skip:
cmd_line_args.append('--eeagent.launch_type.pyon_directory=%s' % (os.environ[pyon_location_string]))
eeagent = EEAgentMain(cmd_line_args)
mainThread = MainRunnerThread(eeagent)
@classmethod
def setupClass(cls):
print "setUpModule"
if cls.skip:
return
try:
cls.mainThread.start()
except Exception, ex:
pass
time.sleep(2)
@classmethod
def teardownClass(cls):
if cls.skip:
return
try:
print "tearDownModule"
cls.mainThread.end()
cls.mainThread.join()
shutil.rmtree(cls.tmp_dir)
except Exception, ex:
pass
def setUp(self):
if PyonRelEEAgentTests.skip:
return
self.beats = []
try:
self.client = EEAgentClient(self.heartbeat, PyonRelEEAgentTests.eeagent.get_cfg())
except Exception, ex:
pass
def tearDown(self):
if PyonRelEEAgentTests.skip:
return
try:
self._clean_all_jobs()
except Exception, ex:
pass
del self.client
def _test_for_pyon(self):
if PyonRelEEAgentTests.skip:
raise unittest.SkipTest()
def heartbeat(self, msg):
b = json.loads(msg)
self.beats.append(b)
def _clean_all_jobs(self):
self.beats = []
self.client.dump()
self.client.poll(count=1)
b = self.beats[-1]
while len(b['processes']) > 0:
self.beats = []
for pd in b['processes']:
if pd['state'][1] == "RUNNING":
self.client.terminate(pd['upid'], pd['round'])
else:
self.client.cleanup(pd['upid'], pd['round'])
self.client.dump()
self.client.poll(count=1)
b = self.beats[-1]
# get the next heartbeat before leaving
self.client.poll(count=1)
def test_run_simple(self):
self._test_for_pyon()
yml_params = {"rel":{
"apps" :
{"type": 'application',
"name": "hello2",
"description": "Hello service (app version)",
"version": "0.1",
"mod": "examples.service.hello_service",
'dependencies': '[]',
"config": {"some": '"Hi"'},
}
}
}
(upid, round) = self.client.launch(yml_params, run_type=EEAgentLaunchType.pyon_single)
self.client.dump()
self.client.poll(count=1)
proc = self._find_process_in_beat(upid)
assert proc
def _find_process_in_beat(self, upid, state=None):
for b in self.beats:
for pd in b['processes']:
if pd['upid'] == upid:
if state is None:
return pd
elif state == pd['state'][1]:
return pd
return None
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6cb74b138415966b9c7a5e1cef3f2c79",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 108,
"avg_line_length": 28.854545454545455,
"alnum_prop": 0.5482041587901701,
"repo_name": "ooici/eeagent",
"id": "4722c8b151ac6f9c09c15d2d6a5175a7787da5ec",
"size": "4801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eeagent/tests/pyon_rel_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "57036"
}
],
"symlink_target": ""
} |
from enum import IntEnum
__author__ = "Piotr Gawlowicz"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = "gawlowicz@tkn.tu-berlin.de"
class SerializationType(IntEnum):
NONE = 0
JSON = 1
PICKLE = 2
MSGPACK = 3
PROTOBUF = 4
class MessageDescription(object):
def __init__(self, msgType=None, sourceUuid=None,
serializationType=SerializationType.NONE):
super().__init__()
self.msgType = msgType
self.sourceUuid = sourceUuid
self.serializationType = serializationType
def serialize(self):
return {"msgType": self.msgType,
"sourceUuid": self.sourceUuid,
"serializationType": self.serializationType}
@classmethod
def parse(cls, buf):
msgType = buf.get("msgType", None)
sourceUuid = buf.get("sourceUuid", None)
sType = buf.get("serializationType", 0)
sType = SerializationType(sType)
return cls(msgType, sourceUuid, sType)
| {
"content_hash": "6a8830524f9e0227154500a977c5b87c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 67,
"avg_line_length": 28.88888888888889,
"alnum_prop": 0.6259615384615385,
"repo_name": "uniflex/uniflex",
"id": "faa641b08abd95556652e35b3f0dc49450903409",
"size": "1040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uniflex/msgs/msgdescription.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94443"
}
],
"symlink_target": ""
} |
r"""Test code for PlanetPopulation Prototype module within EXOSIMS.
Cate Liu, IPAC, 2016"""
import unittest
from EXOSIMS.Prototypes.PlanetPopulation import PlanetPopulation
import numpy as np
import scipy.stats
class TestPlanetPopulation(unittest.TestCase):
def setUp(self):
self.spec = {"modules": {"PlanetPhysicalModel": "PlanetPhysicalModel"}}
self.kscrit = 0.01
self.nsamp = 10000
def tearDown(self):
pass
def test_gen_angles(self):
"""
Test generation of orientation angles.
We expect long. and periapse to be uniformly distributed and
inclination to be sinusoidally distributed.
Edit made by Sonny Rappaport, Cornell, July 2021:
SciPY update has broken this method, so use KS test to check inclination
distribution and alter usage of chi^2 test for the uniform distributions
"""
pp = PlanetPopulation(**self.spec)
I, O, w = pp.gen_angles(self.nsamp)
# O & w are expected to be uniform
for j, (param, param_range) in enumerate(zip([O, w], [pp.Orange, pp.wrange])):
pval = scipy.stats.kstest(
param.value, scipy.stats.uniform.cdf, args=tuple(param_range.value)
).pvalue
if pval < self.kscrit:
_, param, param = pp.gen_angles(self.nsamp)
pval = scipy.stats.kstest(
param.value, scipy.stats.uniform.cdf, args=tuple(param_range.value)
).pvalue
self.assertGreater(
pval,
self.kscrit,
"{} does not appear uniform.".format(["Omega", "omega"][j]),
)
# cdf of the sin distribution for ks test
sin_cdf = lambda x: (-np.cos(x) / 2 + 0.5)
pval = scipy.stats.kstest(I, sin_cdf).pvalue
# allowed one do-over for noise
if pval <= self.kscrit:
I, _, _ = pp.gen_angles(self.nsamp)
pval = scipy.stats.kstest(I, sin_cdf).pvalue
self.assertGreater(pval, self.kscrit, "I does not appear sinusoidal")
def test_gen_plan_params(self):
"""
Test generation of planet orbital and phyiscal properties.
We expect eccentricity and albedo to be uniformly distributed
and sma and radius to be log-uniform
Edit made by Sonny Rappaport, Cornell, July 2021:
SciPY update has broken this method, so use KS test to check log-uniform
distribution and alter usage of chi^2 test for the uniform distributions
"""
pp = PlanetPopulation(**self.spec)
a, e, p, Rp = pp.gen_plan_params(self.nsamp)
# expect e and p to be uniform
for j, (param, param_range) in enumerate(zip([e, p], [pp.erange, pp.prange])):
pval = scipy.stats.kstest(
param,
scipy.stats.uniform.cdf,
args=(param_range[0], param_range[1] - param_range[0]),
).pvalue
if pval <= self.kscrit:
tmp = pp.gen_plan_params(self.nsamp)
pval = scipy.stats.kstest(
tmp[j + 1],
scipy.stats.uniform.cdf,
args=(param_range[0], param_range[1] - param_range[0]),
).pvalue
self.assertGreater(
pval,
self.kscrit,
"{} does not appear uniform.".format(["eccentricity", "albedo"][j]),
)
# expect a and Rp to be log-uniform
for j, (param, param_range) in enumerate(
zip([a.value, Rp.value], [pp.arange.value, pp.Rprange.value])
):
pval = scipy.stats.kstest(
param, scipy.stats.loguniform.cdf, args=tuple(param_range)
).pvalue
if pval < self.kscrit:
a2, _, _, R2 = pp.gen_plan_params(self.nsamp)
pval = scipy.stats.kstest(
[a2.value, R2.value][j],
scipy.stats.loguniform.cdf,
args=tuple(param_range),
).pvalue
self.assertGreater(
pval,
self.kscrit,
"{} does not appear log-uniform.".format(["sma", "planet radius"][j]),
)
def test_checkranges(self):
"""
Test that check ranges is doing what it should do
"""
pp = PlanetPopulation(arange=[10, 1], **self.spec)
self.assertTrue(pp.arange[0].value == 1)
self.assertTrue(pp.arange[1].value == 10)
with self.assertRaises(AssertionError):
pp = PlanetPopulation(prange=[-1, 1], **self.spec)
with self.assertRaises(AssertionError):
pp = PlanetPopulation(erange=[-1, 1], **self.spec)
with self.assertRaises(AssertionError):
pp = PlanetPopulation(arange=[0, 1], **self.spec)
with self.assertRaises(AssertionError):
pp = PlanetPopulation(Rprange=[0, 1], **self.spec)
with self.assertRaises(AssertionError):
pp = PlanetPopulation(Mprange=[0, 1], **self.spec)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "4498fdb1ec9f1b4512096fe4fa536e73",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 87,
"avg_line_length": 34.46,
"alnum_prop": 0.5596827239311278,
"repo_name": "dsavransky/EXOSIMS",
"id": "8e666b6a7cbbe5713e67fba74c3cc0f63dafffe4",
"size": "5169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/Prototypes/test_PlanetPopulation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8087"
},
{
"name": "Cython",
"bytes": "2459"
},
{
"name": "Python",
"bytes": "2936469"
}
],
"symlink_target": ""
} |
"""Implementation of a thread-pool working with channels"""
from thread import (
WorkerThread,
StopProcessing,
)
from threading import Lock
from util import (
AsyncQueue,
DummyLock
)
from Queue import (
Queue,
Empty
)
from graph import Graph
from channel import (
mkchannel,
ChannelWriter,
Channel,
SerialChannel,
CallbackChannelReader
)
import sys
import weakref
from time import sleep
import new
__all__ = ('PoolReader', 'Pool', 'ThreadPool')
class PoolReader(CallbackChannelReader):
"""A reader designed to read from channels which take part in pools
It acts like a handle to the underlying task in the pool."""
__slots__ = ('_task_ref', '_pool_ref')
def __init__(self, channel, task, pool):
CallbackChannelReader.__init__(self, channel)
self._task_ref = weakref.ref(task)
self._pool_ref = weakref.ref(pool)
def __del__(self):
"""Assures that our task will be deleted if we were the last reader"""
task = self._task_ref()
if task is None:
return
pool = self._pool_ref()
if pool is None:
return
# if this is the last reader to the wc we just handled, there
# is no way anyone will ever read from the task again. If so,
# delete the task in question, it will take care of itself and orphans
# it might leave
# 1 is ourselves, + 1 for the call + 1, and 3 magical ones which
# I can't explain, but appears to be normal in the destructor
# On the caller side, getrefcount returns 2, as expected
# When just calling remove_task,
# it has no way of knowing that the write channel is about to diminsh.
# which is why we pass the info as a private kwarg - not nice, but
# okay for now
if sys.getrefcount(self) < 6:
pool.remove_task(task, _from_destructor_ = True)
# END handle refcount based removal of task
#{ Internal
def _read(self, count=0, block=True, timeout=None):
return CallbackChannelReader.read(self, count, block, timeout)
def pool_ref(self):
""":return: reference to the pool we belong to"""
return self._pool_ref
def task_ref(self):
""":return: reference to the task producing our items"""
return self._task_ref
#} END internal
#{ Interface
def task(self):
""":return: task we read from
:raise ValueError: If the instance is not attached to at task"""
task = self._task_ref()
if task is None:
raise ValueError("PoolReader is not associated with at task anymore")
return task
def pool(self):
""":return: pool our task belongs to
:raise ValueError: if the instance does not belong to a pool"""
pool = self._pool_ref()
if pool is None:
raise ValueError("PoolReader is not associated with a pool anymore")
return pool
#} END interface
def read(self, count=0, block=True, timeout=None):
"""Read an item that was processed by one of our threads
:note: Triggers task dependency handling needed to provide the necessary input"""
# NOTE: we always queue the operation that would give us count items
# as tracking the scheduled items or testing the channels size
# is in herently unsafe depending on the design of the task network
# If we put on tasks onto the queue for every request, we are sure
# to always produce enough items, even if the task.min_count actually
# provided enough - its better to have some possibly empty task runs
# than having and empty queue that blocks.
# if the user tries to use us to read from a done task, we will never
# compute as all produced items are already in the channel
task = self._task_ref()
if task is None:
return list()
# END abort if task was deleted
skip_compute = task.is_done() or task.error()
########## prepare ##############################
if not skip_compute:
self._pool_ref()._prepare_channel_read(task, count)
# END prepare pool scheduling
####### read data ########
##########################
# read actual items, tasks were setup to put their output into our channel ( as well )
items = CallbackChannelReader.read(self, count, block, timeout)
##########################
return items
class Pool(object):
"""A thread pool maintains a set of one or more worker threads, but supports
a fully serial mode in which case the amount of threads is zero.
Work is distributed via Channels, which form a dependency graph. The evaluation
is lazy, as work will only be done once an output is requested.
The thread pools inherent issue is the global interpreter lock that it will hit,
which gets worse considering a few c extensions specifically lock their part
globally as well. The only way this will improve is if custom c extensions
are written which do some bulk work, but release the GIL once they have acquired
their resources.
Due to the nature of having multiple objects in git, its easy to distribute
that work cleanly among threads.
:note: the current implementation returns channels which are meant to be
used only from the main thread, hence you cannot consume their results
from multiple threads unless you use a task for it."""
__slots__ = ( '_tasks', # a graph of tasks
'_num_workers', # list of workers
'_queue', # master queue for tasks
'_taskorder_cache', # map task id -> ordered dependent tasks
'_taskgraph_lock', # lock for accessing the task graph
)
# CONFIGURATION
# The type of worker to create - its expected to provide the Thread interface,
# taking the taskqueue as only init argument
# as well as a method called stop_and_join() to terminate it
WorkerCls = None
# The type of lock to use to protect critical sections, providing the
# threading.Lock interface
LockCls = None
# the type of the task queue to use - it must provide the Queue interface
TaskQueueCls = None
def __init__(self, size=0):
self._tasks = Graph()
self._num_workers = 0
self._queue = self.TaskQueueCls()
self._taskgraph_lock = self.LockCls()
self._taskorder_cache = dict()
self.set_size(size)
def __del__(self):
self.set_size(0)
#{ Internal
def _prepare_channel_read(self, task, count):
"""Process the tasks which depend on the given one to be sure the input
channels are filled with data once we process the actual task
Tasks have two important states: either they are done, or they are done
and have an error, so they are likely not to have finished all their work.
Either way, we will put them onto a list of tasks to delete them, providng
information about the failed ones.
Tasks which are not done will be put onto the queue for processing, which
is fine as we walked them depth-first."""
# for the walk, we must make sure the ordering does not change. Even
# when accessing the cache, as it is related to graph changes
self._taskgraph_lock.acquire()
try:
try:
dfirst_tasks = self._taskorder_cache[id(task)]
except KeyError:
# have to retrieve the list from the graph
dfirst_tasks = self._tasks.input_inclusive_dfirst_reversed(task)
self._taskorder_cache[id(task)] = dfirst_tasks
# END handle cached order retrieval
finally:
self._taskgraph_lock.release()
# END handle locking
# check the min count on all involved tasks, and be sure that we don't
# have any task which produces less than the maximum min-count of all tasks
# The actual_count is used when chunking tasks up for the queue, whereas
# the count is usued to determine whether we still have enough output
# on the queue, checking qsize ( ->revise )
# ABTRACT: If T depends on T-1, and the client wants 1 item, T produces
# at least 10, T-1 goes with 1, then T will block after 1 item, which
# is read by the client. On the next read of 1 item, we would find T's
# queue empty and put in another 10, which could put another thread into
# blocking state. T-1 produces one more item, which is consumed right away
# by the two threads running T. Although this works in the end, it leaves
# many threads blocking and waiting for input, which is not desired.
# Setting the min-count to the max of the mincount of all tasks assures
# we have enough items for all.
# Addition: in serial mode, we would enter a deadlock if one task would
# ever wait for items !
actual_count = count
min_counts = (((t.min_count is not None and t.min_count) or count) for t in dfirst_tasks)
min_count = reduce(lambda m1, m2: max(m1, m2), min_counts)
if 0 < count < min_count:
actual_count = min_count
# END set actual count
# the list includes our tasks - the first one to evaluate first, the
# requested one last
for task in dfirst_tasks:
# if task.error() or task.is_done():
# in theory, the should never be consumed task in the pool, right ?
# They delete themselves once they are done. But as we run asynchronously,
# It can be that someone reads, while a task realizes its done, and
# we get here to prepare the read although it already is done.
# Its not a problem though, the task wiill not do anything.
# Hence we don't waste our time with checking for it
# raise AssertionError("Shouldn't have consumed tasks on the pool, they delete themeselves, what happend ?")
# END skip processing
# but use the actual count to produce the output, we may produce
# more than requested
numchunks = 1
chunksize = actual_count
remainder = 0
# we need the count set for this - can't chunk up unlimited items
# In serial mode we could do this by checking for empty input channels,
# but in dispatch mode its impossible ( == not easily possible )
# Only try it if we have enough demand
if task.max_chunksize and actual_count > task.max_chunksize:
numchunks = actual_count / task.max_chunksize
chunksize = task.max_chunksize
remainder = actual_count - (numchunks * chunksize)
# END handle chunking
# the following loops are kind of unrolled - code duplication
# should make things execute faster. Putting the if statements
# into the loop would be less code, but ... slower
if self._num_workers:
# respect the chunk size, and split the task up if we want
# to process too much. This can be defined per task
qput = self._queue.put
if numchunks > 1:
for i in xrange(numchunks):
qput((task.process, chunksize))
# END for each chunk to put
else:
qput((task.process, chunksize))
# END try efficient looping
if remainder:
qput((task.process, remainder))
# END handle chunksize
else:
# no workers, so we have to do the work ourselves
if numchunks > 1:
for i in xrange(numchunks):
task.process(chunksize)
# END for each chunk to put
else:
task.process(chunksize)
# END try efficient looping
if remainder:
task.process(remainder)
# END handle chunksize
# END handle serial mode
# END for each task to process
def _remove_task_if_orphaned(self, task, from_destructor):
"""Check the task, and delete it if it is orphaned"""
# 1 for writer on task, 1 for the getrefcount call + 1 for each other writer/reader
# If we are getting here from the destructor of an RPool channel,
# its totally valid to virtually decrement the refcount by 1 as
# we can expect it to drop once the destructor completes, which is when
# we finish all recursive calls
max_ref_count = 3 + from_destructor
if sys.getrefcount(task.writer().channel) < max_ref_count:
self.remove_task(task, from_destructor)
#} END internal
#{ Interface
def size(self):
""":return: amount of workers in the pool
:note: method is not threadsafe !"""
return self._num_workers
def set_size(self, size=0):
"""Set the amount of workers to use in this pool. When reducing the size,
threads will continue with their work until they are done before effectively
being removed.
:return: self
:param size: if 0, the pool will do all work itself in the calling thread,
otherwise the work will be distributed among the given amount of threads.
If the size is 0, newly added tasks will use channels which are NOT
threadsafe to optimize item throughput.
:note: currently NOT threadsafe !"""
assert size > -1, "Size cannot be negative"
# either start new threads, or kill existing ones.
# If we end up with no threads, we process the remaining chunks on the queue
# ourselves
cur_count = self._num_workers
if cur_count < size:
# we can safely increase the size, even from serial mode, as we would
# only be able to do this if the serial ( sync ) mode finished processing.
# Just adding more workers is not a problem at all.
add_count = size - cur_count
for i in range(add_count):
self.WorkerCls(self._queue).start()
# END for each new worker to create
self._num_workers += add_count
elif cur_count > size:
# We don't care which thread exactly gets hit by our stop request
# On their way, they will consume remaining tasks, but new ones
# could be added as we speak.
del_count = cur_count - size
for i in range(del_count):
self._queue.put((self.WorkerCls.stop, True)) # arg doesnt matter
# END for each thread to stop
self._num_workers -= del_count
# END handle count
if size == 0:
# NOTE: we do not preocess any tasks still on the queue, as we ill
# naturally do that once we read the next time, only on the tasks
# that are actually required. The queue will keep the tasks,
# and once we are deleted, they will vanish without additional
# time spend on them. If there shouldn't be any consumers anyway.
# If we should reenable some workers again, they will continue on the
# remaining tasks, probably with nothing to do.
# We can't clear the task queue if we have removed workers
# as they will receive the termination signal through it, and if
# we had added workers, we wouldn't be here ;).
pass
# END process queue
return self
def num_tasks(self):
""":return: amount of tasks"""
self._taskgraph_lock.acquire()
try:
return len(self._tasks.nodes)
finally:
self._taskgraph_lock.release()
def remove_task(self, task, _from_destructor_ = False):
"""
Delete the task.
Additionally we will remove orphaned tasks, which can be identified if their
output channel is only held by themselves, so no one will ever consume
its items.
This method blocks until all tasks to be removed have been processed, if
they are currently being processed.
:return: self"""
self._taskgraph_lock.acquire()
try:
# it can be that the task is already deleted, but its chunk was on the
# queue until now, so its marked consumed again
if not task in self._tasks.nodes:
return self
# END early abort
# the task we are currently deleting could also be processed by
# a thread right now. We don't care about it as its taking care about
# its write channel itself, and sends everything it can to it.
# For it it doesn't matter that its not part of our task graph anymore.
# now delete our actual node - be sure its done to prevent further
# processing in case there are still client reads on their way.
task.set_done()
# keep its input nodes as we check whether they were orphaned
in_tasks = task.in_nodes
self._tasks.remove_node(task)
self._taskorder_cache.clear()
finally:
self._taskgraph_lock.release()
# END locked deletion
for t in in_tasks:
self._remove_task_if_orphaned(t, _from_destructor_)
# END handle orphans recursively
return self
def add_task(self, task):
"""Add a new task to be processed.
:return: a read channel to retrieve processed items. If that handle is lost,
the task will be considered orphaned and will be deleted on the next
occasion."""
# create a write channel for it
ctype = Channel
# adjust the task with our pool ref, if it has the slot and is empty
# For now, we don't allow tasks to be used in multiple pools, except
# for by their channels
if hasattr(task, 'pool'):
their_pool = task.pool()
if their_pool is None:
task.set_pool(self)
elif their_pool is not self:
raise ValueError("Task %r is already registered to another pool" % task.id)
# END handle pool exclusivity
# END handle pool aware tasks
self._taskgraph_lock.acquire()
try:
self._taskorder_cache.clear()
self._tasks.add_node(task)
# Use a non-threadsafe queue
# This brings about 15% more performance, but sacrifices thread-safety
if self.size() == 0:
ctype = SerialChannel
# END improve locks
# setup the tasks channel - respect the task creators choice though
# if it is set.
wc = task.writer()
ch = None
if wc is None:
ch = ctype()
wc = ChannelWriter(ch)
task.set_writer(wc)
else:
ch = wc.channel
# END create write channel ifunset
rc = PoolReader(ch, task, self)
finally:
self._taskgraph_lock.release()
# END sync task addition
# If the input channel is one of our read channels, we add the relation
if hasattr(task, 'reader'):
ic = task.reader()
if hasattr(ic, 'pool_ref') and ic.pool_ref()() is self:
self._taskgraph_lock.acquire()
try:
self._tasks.add_edge(ic._task_ref(), task)
# additionally, bypass ourselves when reading from the
# task, if possible
if hasattr(ic, '_read'):
task.set_read(ic._read)
# END handle read bypass
finally:
self._taskgraph_lock.release()
# END handle edge-adding
# END add task relation
# END handle input channels for connections
return rc
#} END interface
class ThreadPool(Pool):
"""A pool using threads as worker"""
WorkerCls = WorkerThread
LockCls = Lock
TaskQueueCls = AsyncQueue
| {
"content_hash": "fcf162262d49b4e1649f3891f87e7949",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 112,
"avg_line_length": 35.017716535433074,
"alnum_prop": 0.6929563213221653,
"repo_name": "Conjuro/async",
"id": "01bc30f7728cdf38cfdc4c13c62fd28dca562028",
"size": "17999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "31309"
},
{
"name": "Python",
"bytes": "100323"
}
],
"symlink_target": ""
} |
import sys, os, subprocess, json
import configuration, logtool
from ExifException import ExifException
log = logtool.getLogger("exiftool")
class ExifTool:
""" Wrapper arround Exiftool. Processes an *existing* media file and print information as json
Supports:
1) Reading tags from file
2) Editing + Writing new tags
3) Show supported filetypes/groups/tags
Generally methods that return json, do so directly. Text information is not returned but remains in self.stdout
NOTE: Each instance of the class has 1-1 relationship with a media file
"""
# Full path of media file
filename = None
# exiftool location (for execution)
exiftoolpath = None
# stdout Output of last execution
stdout = None
# stderr Output of last execution
stderr = None
def __init__(self, fname):
self.filename = fname
self.exiftoolpath = os.path.join(configuration.getExifDir(),"exiftool")
print self.exiftoolpath
def exiftool(self, args):
""" Execute exiftool with args
args: list of arguments as pass in command line
"""
log.debug("Executing exiftool " + `args`)
process = subprocess.Popen( [ self.exiftoolpath ] + args, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
( self.stdout , self.stderr ) = process.communicate()
if process.returncode != 0:
raise ExifException(self.stderr )
def probe(self, group="ALL", showgroups=False):
""" return properties as a python object. Numeric Values only!
Args:
group (optional): The tag group to fetch. Probes for everything by default
showgroups (optional): will return results as a dictionary of dictionaries for all the groups. (useful for separating tag groups)
"""
options = ["-n", "-j"]
if group == "XMP" or group == "EXIF" or group == "IPTC":
options.append("-%s:All" % group)
if group == "EXIF":
options.append("-Composite:All") #exiftool "hides" Exif tags under composite!
if showgroups:
options.append("-g")
options.append( self.filename )
self.exiftool(options)
return json.loads(self.stdout)[0]
def probegroups(self):
""" return all the metadata groups defined in the file as a list"""
md = probe()
return [ k for k in md ]
def listw(self, group=None):
""" List all writable TAGS. Optionally specify "group" e.g. "EXIF:ALL" """
if group == None:
self.exiftool(["-listw"])
else:
self.exiftool(["-listw",group])
def save(self, data):
""" data: dictionary of values to save """
args = []
for k in data:
args.append("-%s=%s" % ( k, str(data[k]) ))
args.append(self.filename)
self.exiftool(args)
def createXMP(self, outfile):
""" export metadata as an XMP sidecar file """
args = [ "-tagsfromfile", self.filename , outfile ] # using -o out.xmp complains if already exists
self.exiftool(args)
def add(self, data):
""" data: dictionary of values to save that didn't exist in original.
NOTE: The reason for having both save() and add() methods is that exiftool uses a different syntax
for adding new tags and changing existing tags
"""
args = []
for k in data:
args.append("-%s+=%s" % ( k, data[k]) )
args.append(self.filename)
self.exiftool(args)
################### MAIN #######################
if __name__ == "__main__" :
exif = ExifTool(sys.argv[1])
try:
exif.listw("-EXIF:ALL")
print "Stdout is: \n\n" + exif.stdout
#instance method
print exif.probe()
newdata = { "Software" : 'Bas fotware' }
exif.save(newdata)
print exif.probe()
exif.createXMP("out.xmp" );
except ExifException as e:
print e
| {
"content_hash": "7de04c0e2d1a3bceac320af53b2ae2fc",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 141,
"avg_line_length": 36.0625,
"alnum_prop": 0.5924733845011141,
"repo_name": "xmichael/tagger",
"id": "224743d469f5bcf691d42b717a1190e4e2eb1012",
"size": "4039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/lib/exiftool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "642711"
},
{
"name": "Python",
"bytes": "194953"
},
{
"name": "Ruby",
"bytes": "505"
},
{
"name": "Shell",
"bytes": "205"
}
],
"symlink_target": ""
} |
import logging
from functools import partial
from netaddr import IPAddress
from exceptions import CommandExecError
from utils import retry_until_success, debug_failures
NET_NONE = "none"
logger = logging.getLogger(__name__)
class Workload(object):
"""
A calico workload.
These are the end-users containers that will run application-level
software.
"""
def __init__(self, host, name, image="busybox", network="bridge", ip=None):
"""
Create the workload and detect its IPs.
:param host: The host container on which this workload is instantiated.
All commands executed by this container will be passed through the host
via docker exec.
:param name: The name given to the workload container. This name is
passed to docker and can be used inside docker commands.
:param image: The docker image to be used to instantiate this
container. busybox used by default because it is extremely small and
has ping.
:param network: The DockerNetwork to connect to. Set to None to use
default Docker networking.
:param ip: The ip address to assign to the container.
"""
self.host = host
self.name = name
ip_option = ("--ip %s" % ip) if ip else ""
command = "docker run -tid --name %s --net %s %s %s" % \
(name, network, ip_option, image)
docker_run_wl = partial(host.execute, command)
retry_until_success(docker_run_wl)
self.ip = host.execute(
"docker inspect --format "
"'{{.NetworkSettings.Networks.%s.IPAddress}}' %s"
% (network, name))
def execute(self, command):
"""
Execute arbitrary commands on this workload.
"""
# Make sure we've been created in the context of a host. Done here
# instead of in __init__ as we can't exist in the host until we're
# created.
assert self in self.host.workloads
return self.host.execute("docker exec %s %s" % (self.name, command))
def _get_ping_function(self, ip):
"""
Return a function to ping the supplied IP address from this workload.
:param ip: The IPAddress to ping.
:return: A partial function that can be executed to perform the ping.
The function raises a CommandExecError exception if the ping fails,
or returns the output of the ping.
"""
# Default to "ping"
ping = "ping"
try:
version = IPAddress(ip).version
assert version in [4, 6]
if version == 6:
ping = "ping6"
except BaseException:
pass
args = [
ping,
"-c", "1", # Number of pings
"-W", "1", # Timeout for each ping
ip,
]
command = ' '.join(args)
ping = partial(self.execute, command)
return ping
@debug_failures
def check_can_ping(self, ip, retries=0):
"""
Execute a ping from this workload to the ip. Assert than a workload
can ping an IP. Use retries to allow for convergence.
Use of this method assumes the network will be transitioning from a
state where the destination is currently unreachable.
:param ip: The IP address (str or IPAddress) to ping.
:param retries: The number of retries.
:return: None.
"""
try:
retry_until_success(self._get_ping_function(ip),
retries=retries,
ex_class=CommandExecError)
except CommandExecError:
return False
return True
@debug_failures
def check_cant_ping(self, ip, retries=0):
"""
Execute a ping from this workload to the ip. Assert that the workload
cannot ping an IP. Use retries to allow for convergence.
Use of this method assumes the network will be transitioning from a
state where the destination is currently reachable.
:param ip: The IP address (str or IPAddress) to ping.
:param retries: The number of retries.
:return: None.
"""
ping = self._get_ping_function(ip)
def cant_ping():
try:
ping()
except CommandExecError:
pass
else:
raise _PingError()
try:
retry_until_success(cant_ping,
retries=retries,
ex_class=_PingError)
except _PingError:
return False
return True
def _get_tcp_function(self, ip):
"""
Return a function to check tcp connectivity to another ip.
:param ip: The ip to check against.
:return: A partial function that can be executed to perform the check.
The function raises a CommandExecError exception if the check fails,
or returns the output of the check.
"""
# test_string = "hello"
args = [
"/code/tcpping.sh",
ip,
]
command = ' '.join(args)
tcp_check = partial(self.execute, command)
return tcp_check
def _get_tcp_asym_function(self, ip):
"""
Return a function to check tcp connectivity to another ip.
:param ip: The ip to check against.
:return: A partial function that can be executed to perform the check.
The function raises a CommandExecError exception if the check fails,
or returns the output of the check.
"""
# test_string = "hello"
args = [
"/code/tcppingasym.sh",
ip,
]
command = ' '.join(args)
tcp_asym_check = partial(self.execute, command)
return tcp_asym_check
@debug_failures
def check_can_tcp(self, ip, retries=0):
"""
Execute a tcp check from this ip to the other ip.
Assert that a ip can connect to another ip.
Use retries to allow for convergence.
Use of this method assumes the network will be transitioning from a
state where the destination is currently unreachable.
:param ip: The ip to check connectivity to.
:param retries: The number of retries.
:return: None.
"""
try:
retry_until_success(self._get_tcp_function(ip),
retries=retries,
ex_class=CommandExecError)
except CommandExecError:
return False
return True
@debug_failures
def check_can_tcp_asym(self, ip, retries=0):
"""
Execute a tcp check from this ip to the other ip.
Assert that a ip can connect to another ip.
Use retries to allow for convergence.
Use of this method assumes the network will be transitioning from a
state where the destination is currently unreachable.
:param ip: The ip to check connectivity to.
:param retries: The number of retries.
:return: None.
"""
try:
retry_until_success(self._get_tcp_asym_function(ip),
retries=retries,
ex_class=CommandExecError)
except CommandExecError:
return False
return True
@debug_failures
def check_cant_tcp(self, ip, retries=0):
"""
Execute a ping from this workload to an ip.
Assert that the workload cannot connect to an IP using tcp.
Use retries to allow for convergence.
Use of this method assumes the network will be transitioning from a
state where the destination is currently reachable.
:param ip: The ip to check connectivity to.
:param retries: The number of retries.
:return: None.
"""
tcp_check = self._get_tcp_function(ip)
def cant_tcp():
try:
tcp_check()
except CommandExecError:
pass
else:
raise _PingError()
try:
retry_until_success(cant_tcp,
retries=retries,
ex_class=_PingError)
except _PingError:
return False
return True
def _get_udp_function(self, ip):
"""
Return a function to check udp connectivity to another ip.
:param ip: The ip to check against.
:return: A partial function that can be executed to perform the check.
The function raises a CommandExecError exception if the check fails,
or returns the output of the check.
"""
args = [
"/code/udpping.sh",
ip,
]
command = ' '.join(args)
udp_check = partial(self.execute, command)
return udp_check
@debug_failures
def check_can_udp(self, ip, retries=0):
"""
Execute a udp check from this workload to an ip.
Assert that this workload can connect to another ip.
Use retries to allow for convergence.
Use of this method assumes the network will be transitioning from a
state where the destination is currently unreachable.
:param ip: The ip to check connectivity to.
:param retries: The number of retries.
:return: None.
"""
try:
retry_until_success(self._get_udp_function(ip),
retries=retries,
ex_class=CommandExecError)
except CommandExecError:
return False
return True
@debug_failures
def check_cant_udp(self, ip, retries=0):
"""
Execute a udp check from this workload to the ip. Assert that
the workload cannot connect via udp to an IP.
Use retries to allow for convergence.
Use of this method assumes the network will be transitioning from a
state where the destination is currently reachable.
:param ip: The ip to check connectivity to.
:param retries: The number of retries.
:return: None.
"""
udp_check = self._get_udp_function(ip)
def cant_udp():
try:
udp_check()
except CommandExecError:
pass
else:
raise _PingError()
try:
retry_until_success(cant_udp,
retries=retries,
ex_class=_PingError)
except _PingError:
return False
return True
def __str__(self):
return self.name
class _PingError(Exception):
pass
| {
"content_hash": "bb61d4d26bce700df61e073f5b696852",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 79,
"avg_line_length": 31.438953488372093,
"alnum_prop": 0.5662505779010634,
"repo_name": "projectcalico/libcalico",
"id": "ca139b2bdf66f9c7fb0ce6c53e0dde1db9f36832",
"size": "11421",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "calico_test/tests/st/utils/workload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2700"
},
{
"name": "Python",
"bytes": "546827"
}
],
"symlink_target": ""
} |
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError, Py4JNetworkError
import warnings
import ast
import traceback
import warnings
import signal
import base64
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# for back compatibility
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(object):
""" A context impl that uses Py4j to communicate to JVM
"""
def __init__(self, z):
self.z = z
self.paramOption = gateway.jvm.org.apache.zeppelin.display.ui.OptionInput.ParamOption
self.javaList = gateway.jvm.java.util.ArrayList
self.max_result = 1000
self._displayhook = lambda *args: None
self._setup_matplotlib()
def getInterpreterContext(self):
return self.z.getCurrentInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def textbox(self, name, defaultValue=""):
return self.z.textbox(name, defaultValue)
def noteTextbox(self, name, defaultValue=""):
return self.z.noteTextbox(name, defaultValue)
def select(self, name, options, defaultValue=""):
return self.z.select(name, defaultValue, self.getParamOptions(options))
def noteSelect(self, name, options, defaultValue=""):
return self.z.noteSelect(name, defaultValue, self.getParamOptions(options))
def checkbox(self, name, options, defaultChecked=[]):
return self.z.checkbox(name, self.getDefaultChecked(defaultChecked), self.getParamOptions(options))
def noteCheckbox(self, name, options, defaultChecked=[]):
return self.z.noteCheckbox(name, self.getDefaultChecked(defaultChecked), self.getParamOptions(options))
def getParamOptions(self, options):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
return javaOptions
def getDefaultChecked(self, defaultChecked):
javaDefaultChecked = self.javaList()
for check in defaultChecked:
javaDefaultChecked.append(check)
return javaDefaultChecked
def show(self, p, **kwargs):
if hasattr(p, '__name__') and p.__name__ == "matplotlib.pyplot":
self.show_matplotlib(p, **kwargs)
elif type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
elif hasattr(p, '__call__'):
p() #error reporting
def show_dataframe(self, df, show_index=False, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = StringIO("")
if show_index:
idx_name = str(df.index.name) if df.index.name is not None else ""
header_buf.write(idx_name + "\t")
header_buf.write(str(df.columns[0]))
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(str(col))
header_buf.write("\n")
body_buf = StringIO("")
rows = df.head(self.max_result).values if limit else df.values
index = df.index.values
for idx, row in zip(index, rows):
if show_index:
body_buf.write("%html <strong>{}</strong>".format(idx))
body_buf.write("\t")
body_buf.write(str(row[0]))
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(str(cell))
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
def show_matplotlib(self, p, fmt="png", width="auto", height="auto",
**kwargs):
"""Matplotlib show function
"""
if fmt == "png":
img = BytesIO()
p.savefig(img, format=fmt)
img_str = b"data:image/png;base64,"
img_str += base64.b64encode(img.getvalue().strip())
img_tag = "<img src={img} style='width={width};height:{height}'>"
# Decoding is necessary for Python 3 compability
img_str = img_str.decode("ascii")
img_str = img_tag.format(img=img_str, width=width, height=height)
elif fmt == "svg":
img = StringIO()
p.savefig(img, format=fmt)
img_str = img.getvalue()
else:
raise ValueError("fmt must be 'png' or 'svg'")
html = "%html <div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img_str))
img.close()
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72,
fontsize=10, interactive=True, format='png')
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def handler_stop_signals(sig, frame):
sys.exit("Got signal : " + str(sig))
signal.signal(signal.SIGINT, handler_stop_signals)
host = "127.0.0.1"
if len(sys.argv) >= 3:
host = sys.argv[2]
_zcUserQueryNameSpace = {}
client = GatewayClient(address=host, port=int(sys.argv[1]))
#gateway = JavaGateway(client, auto_convert = True)
gateway = JavaGateway(client)
intp = gateway.entry_point
intp.onPythonScriptInitialized(os.getpid())
java_import(gateway.jvm, "org.apache.zeppelin.display.Input")
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
_zcUserQueryNameSpace["z"] = z
output = Logger()
sys.stdout = output
#sys.stderr = output
while True :
req = intp.getStatements()
if req == None:
break
try:
stmts = req.statements().split("\n")
final_code = []
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
for s in stmts:
if s == None:
continue
# skip comment
s_stripped = s.strip()
if len(s_stripped) == 0 or s_stripped.startswith("#"):
continue
final_code.append(s)
if final_code:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(final_code), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
except:
raise Exception(traceback.format_exc())
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except Py4JNetworkError:
# lost connection from gateway server. exit
sys.exit(1)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| {
"content_hash": "17e16313d0f30f9986d19911463c0fa5",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 107,
"avg_line_length": 30.989795918367346,
"alnum_prop": 0.645483481505872,
"repo_name": "anthonycorbacho/incubator-zeppelin",
"id": "519d1af4557cec2e2bd40a07b86c1b71b618c625",
"size": "9896",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "python/src/main/resources/python/zeppelin_python.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11970"
},
{
"name": "CSS",
"bytes": "81177"
},
{
"name": "Groovy",
"bytes": "9274"
},
{
"name": "HTML",
"bytes": "290802"
},
{
"name": "Java",
"bytes": "3968010"
},
{
"name": "JavaScript",
"bytes": "512518"
},
{
"name": "Python",
"bytes": "74385"
},
{
"name": "R",
"bytes": "21301"
},
{
"name": "Roff",
"bytes": "60995"
},
{
"name": "Ruby",
"bytes": "3101"
},
{
"name": "Scala",
"bytes": "344382"
},
{
"name": "Shell",
"bytes": "77911"
},
{
"name": "Thrift",
"bytes": "5084"
},
{
"name": "XSLT",
"bytes": "1326"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.