text stringlengths 4 1.02M | meta dict |
|---|---|
import handlers
from django.conf.urls.defaults import *
from treeio.core.api.auth import auth_engine
from treeio.core.api.doc import documentation_view
from treeio.core.api.resource import CsrfExemptResource
ad = { 'authentication': auth_engine }
#events resources
eventResource = CsrfExemptResource(handler = handlers.EventHandler, **ad)
urlpatterns = patterns('',
#Events
url(r'^doc$', documentation_view, kwargs={'module': handlers}, name="api_events_doc"),
url(r'^events$', eventResource, name="api_events"),
url(r'^event/(?P<object_ptr>\d+)', eventResource, name="api_events"),
)
| {
"content_hash": "40fa1180b777ab449fbbe4e9e2ff791e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 90,
"avg_line_length": 31.68421052631579,
"alnum_prop": 0.7358803986710963,
"repo_name": "rogeriofalcone/treeio",
"id": "c70131190d386bb470e376512237454dc57ad9b4",
"size": "739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/api/urls.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import cPickle
import gzip
# Third-party libraries
import numpy as np
def load_data(pklsize):
"""Return a tuple containing ``(image_data, label_data)``
image_data contain array of tuple (image, size(width, height))"""
f = gzip.open('../data/cvl' + pklsize.__str__() + '.str.pkl.gz', 'rb')
images_data, lable_data = cPickle.load(f)
f.close()
return (images_data, lable_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
| {
"content_hash": "72992cb4abd4602ad8dd69fda51e9943",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 30.217391304347824,
"alnum_prop": 0.6503597122302158,
"repo_name": "avicorp/firstLook",
"id": "27ceb6ec120f5cd2eda0154db2c256014df27d57",
"size": "729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cvl_loader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "88319"
}
],
"symlink_target": ""
} |
from timeit import Timer
# Implements a decorator for quick timing of functions.
# Note that this will not provide any return value.
def time_it(count):
def time_me_decorator(function):
def function_wrapper(*args, **kwargs):
t = Timer(lambda: function(*args, **kwargs))
time = t.timeit(count)
delta = time / count
if delta > 1:
print('%r (%r, %r) | %sx | average %2.3f seconds per run'
% (function.__name__, args, kwargs, count, delta))
else:
print('%r (%r, %r) | %sx | average %2.3f ms per run'
% (function.__name__, args, kwargs, count, delta * 1000))
return function_wrapper
return time_me_decorator
| {
"content_hash": "022fbf076c9a1b46707fc0f31d1c19e3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 36.523809523809526,
"alnum_prop": 0.5436766623207301,
"repo_name": "bobbyluig/Eclipse",
"id": "c51caaac786017f00fa44737c505cf911c7db4c8",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/shared/timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1401"
},
{
"name": "C",
"bytes": "18849"
},
{
"name": "C++",
"bytes": "1175923"
},
{
"name": "CMake",
"bytes": "58991"
},
{
"name": "CSS",
"bytes": "592222"
},
{
"name": "Forth",
"bytes": "329"
},
{
"name": "HTML",
"bytes": "48266"
},
{
"name": "JavaScript",
"bytes": "790348"
},
{
"name": "Jupyter Notebook",
"bytes": "84755"
},
{
"name": "Python",
"bytes": "412895"
},
{
"name": "Shell",
"bytes": "6860"
}
],
"symlink_target": ""
} |
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Simple volume rendering example.
reader = vtk.vtkSLCReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/sphere.slc")
# Create transfer functions for opacity and color
opacityTransferFunction = vtk.vtkPiecewiseFunction()
opacityTransferFunction.AddPoint(0, 0.0)
opacityTransferFunction.AddPoint(30, 0.0)
opacityTransferFunction.AddPoint(80, 0.5)
opacityTransferFunction.AddPoint(255, 0.5)
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(64.0, 1.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(128.0, 0.0, 0.0, 1.0)
colorTransferFunction.AddRGBPoint(192.0, 0.0, 1.0, 0.0)
colorTransferFunction.AddRGBPoint(255.0, 0.0, 0.2, 0.0)
# Create properties, mappers, volume actors, and ray cast function
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorTransferFunction)
volumeProperty.SetScalarOpacity(opacityTransferFunction)
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(600, 300)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.SetBackground(0.1, 0.2, 0.4)
i = 0
while i < 2:
j = 0
while j < 4:
idx = str(i) + "_" + str(j)
exec("volumeMapper_" + idx + " = vtk.vtkVolumeRayCastMapper()")
eval("volumeMapper_" + idx).SetInputConnection(reader.GetOutputPort())
eval("volumeMapper_" + idx).SetVolumeRayCastFunction(compositeFunction)
eval("volumeMapper_" + idx).SetSampleDistance(0.4)
eval("volumeMapper_" + idx).CroppingOn()
eval("volumeMapper_" + idx).SetCroppingRegionPlanes(
17, 33, 17, 33, 17, 33)
exec("volume_" + idx + " = vtk.vtkVolume()")
eval("volume_" + idx).SetMapper(eval("volumeMapper_" + idx))
eval("volume_" + idx).SetProperty(volumeProperty)
exec("userMatrix_" + idx + " = vtk.vtkTransform()")
eval("userMatrix_" + idx).PostMultiply()
eval("userMatrix_" + idx).Identity()
eval("userMatrix_" + idx).Translate(-25, -25, -25)
if (i == 0):
eval("userMatrix_" + idx).RotateX(j * 87 + 23)
eval("userMatrix_" + idx).RotateY(16)
else:
eval("userMatrix_" + idx).RotateX(27)
eval("userMatrix_" + idx).RotateY(j * 87 + 19)
eval("userMatrix_" + idx).Translate(j * 55 + 25, i * 55 + 25, 0)
eval("volume_" + idx).SetUserTransform(eval("userMatrix_" + idx))
ren1.AddViewProp(eval("volume_" + idx))
j += 1
i += 1
volumeMapper_0_0.SetCroppingRegionFlagsToSubVolume()
volumeMapper_0_1.SetCroppingRegionFlagsToCross()
volumeMapper_0_2.SetCroppingRegionFlagsToInvertedCross()
volumeMapper_0_3.SetCroppingRegionFlags(24600)
volumeMapper_1_0.SetCroppingRegionFlagsToFence()
volumeMapper_1_1.SetCroppingRegionFlagsToInvertedFence()
volumeMapper_1_2.SetCroppingRegionFlags(1)
volumeMapper_1_3.SetCroppingRegionFlags(67117057)
ren1.GetCullers().InitTraversal()
culler = ren1.GetCullers().GetNextItem()
culler.SetSortingStyleToBackToFront()
ren1.ResetCamera()
ren1.GetActiveCamera().Zoom(3.0)
renWin.Render()
def TkCheckAbort (object_binding, event_name):
foo = renWin.GetEventPending()
if (foo != 0):
renWin.SetAbortRender(1)
renWin.AddObserver("AbortCheckEvent", TkCheckAbort)
iren.Initialize()
#iren.Start()
| {
"content_hash": "4138a6687a390966128d2f35c33037d8",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 34.91509433962264,
"alnum_prop": 0.6881923804377196,
"repo_name": "hlzz/dotfiles",
"id": "6b00ab049340f60d50cc7572754775464c12613a",
"size": "3724",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/Rendering/Volume/Testing/Python/volRCCropRegions.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
import os
os.environ['CQLENG_ALLOW_SCHEMA_MANAGEMENT'] = '1'
import logging
log = logging.getLogger()
log.setLevel('INFO')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
log.addHandler(handler)
from uuid import uuid4
from cassandra.cqlengine import columns
from cassandra.cqlengine import connection
from cassandra.cqlengine import management
from cassandra.cqlengine import ValidationError
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.query import BatchQuery, LWTException
KEYSPACE = "testkeyspace"
class FamilyMembers(Model):
__keyspace__ = KEYSPACE
id = columns.UUID(primary_key=True, default=uuid4)
surname = columns.Text(primary_key=True)
name = columns.Text(primary_key=True)
birth_year = columns.Integer()
sex = columns.Text(min_length=1, max_length=1)
def validate(self):
super(FamilyMembers, self).validate()
if self.sex and self.sex not in 'mf':
raise ValidationError("FamilyMember.sex must be one of ['m', 'f']")
if self.birth_year and self.sex == 'f':
raise ValidationError("FamilyMember.birth_year is set, and 'a lady never tells'")
def main():
connection.default()
# Management functions would normally be used in development, and possibly for deployments.
# They are typically not part of a core application.
log.info("### creating keyspace...")
management.create_keyspace_simple(KEYSPACE, 1)
log.info("### syncing model...")
management.sync_table(FamilyMembers)
# default uuid is assigned
simmons = FamilyMembers.create(surname='Simmons', name='Gene', birth_year=1949, sex='m')
# add members to his family later
FamilyMembers.create(id=simmons.id, surname='Simmons', name='Nick', birth_year=1989, sex='m')
sophie = FamilyMembers.create(id=simmons.id, surname='Simmons', name='Sophie', sex='f')
nick = FamilyMembers.objects(id=simmons.id, surname='Simmons', name='Nick')
try:
nick.iff(birth_year=1988).update(birth_year=1989)
except LWTException:
print("precondition not met")
log.info("### setting individual column to NULL by updating it to None")
nick.update(birth_year=None)
# showing validation
try:
FamilyMembers.create(id=simmons.id, surname='Tweed', name='Shannon', birth_year=1957, sex='f')
except ValidationError:
log.exception('INTENTIONAL VALIDATION EXCEPTION; Failed creating instance:')
FamilyMembers.create(id=simmons.id, surname='Tweed', name='Shannon', sex='f')
log.info("### add multiple as part of a batch")
# If creating many at one time, can use a batch to minimize round-trips
hogan_id = uuid4()
with BatchQuery() as b:
FamilyMembers.batch(b).create(id=hogan_id, surname='Hogan', name='Hulk', sex='m')
FamilyMembers.batch(b).create(id=hogan_id, surname='Hogan', name='Linda', sex='f')
FamilyMembers.batch(b).create(id=hogan_id, surname='Hogan', name='Nick', sex='m')
FamilyMembers.batch(b).create(id=hogan_id, surname='Hogan', name='Brooke', sex='f')
log.info("### All members")
for m in FamilyMembers.all():
print(m, m.birth_year, m.sex)
log.info("### Select by partition key")
for m in FamilyMembers.objects(id=simmons.id):
print(m, m.birth_year, m.sex)
log.info("### Constrain on clustering key")
for m in FamilyMembers.objects(id=simmons.id, surname=simmons.surname):
print(m, m.birth_year, m.sex)
log.info("### Constrain on clustering key")
kids = FamilyMembers.objects(id=simmons.id, surname=simmons.surname, name__in=['Nick', 'Sophie'])
log.info("### Delete a record")
FamilyMembers(id=hogan_id, surname='Hogan', name='Linda').delete()
for m in FamilyMembers.objects(id=hogan_id):
print(m, m.birth_year, m.sex)
management.drop_keyspace(KEYSPACE)
if __name__ == "__main__":
main()
| {
"content_hash": "e7a3cf64874c7771dd5458d2803f7739",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 102,
"avg_line_length": 37.56603773584906,
"alnum_prop": 0.6828227021597187,
"repo_name": "datastax/python-driver",
"id": "35100471c778a2cad309fc8204c93529a1e90f61",
"size": "4652",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "example_mapper.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28914"
},
{
"name": "Cython",
"bytes": "51225"
},
{
"name": "Groovy",
"bytes": "41012"
},
{
"name": "PowerShell",
"bytes": "5631"
},
{
"name": "Python",
"bytes": "3219458"
}
],
"symlink_target": ""
} |
import asyncio
from autobahn.asyncio.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
import json
import random
class SlowSquareClientProtocol(WebSocketClientProtocol):
def onOpen(self):
x = 10. * random.random()
self.sendMessage(json.dumps(x).encode('utf8'))
print("Request to square {} sent.".format(x))
def onMessage(self, payload, isBinary):
if not isBinary:
res = json.loads(payload.decode('utf8'))
print("Result received: {}".format(res))
self.sendClose()
def onClose(self, wasClean, code, reason):
if reason:
print(reason)
loop.stop()
if __name__ == '__main__':
factory = WebSocketClientFactory("ws://127.0.0.1:9000")
factory.protocol = SlowSquareClientProtocol
loop = asyncio.get_event_loop()
coro = loop.create_connection(factory, '127.0.0.1', 9000)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
| {
"content_hash": "7883ff4a7487345b9e8b74167e956050",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 65,
"avg_line_length": 26.72972972972973,
"alnum_prop": 0.641051567239636,
"repo_name": "tavendo/AutobahnPython",
"id": "89d56791cd94e405abf2f2537afd43e917d169fb",
"size": "2283",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/asyncio/websocket/slowsquare/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3715"
},
{
"name": "Python",
"bytes": "983397"
}
],
"symlink_target": ""
} |
import unittest
import os
import sys
import subprocess
import re
class TestFlagsUseMkldnn(unittest.TestCase):
def setUp(self):
self._python_interp = sys.executable
self._python_interp += " check_flags_mkldnn_ops_on_off.py"
self.env = os.environ.copy()
self.env[str("DNNL_VERBOSE")] = str("1")
self.env[str("FLAGS_use_mkldnn")] = str("1")
self.relu_regex = b"^onednn_verbose,exec,cpu,eltwise,.+alg:eltwise_relu alpha:0 beta:0,10x20x20"
self.ew_add_regex = (
b"^onednn_verbose,exec,cpu,binary.+alg:binary_add,10x20x30:10x20x30"
)
self.matmul_regex = (
b"^onednn_verbose,exec,cpu,matmul,.*10x20x30:10x30x20:10x20x20"
)
def flags_use_mkl_dnn_common(self, e):
cmd = self._python_interp
env = dict(self.env, **e)
proc = subprocess.Popen(
cmd.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
out, err = proc.communicate()
returncode = proc.returncode
assert returncode == 0
return out, err
def _print_when_false(self, cond, out, err):
if not cond:
print('out', out)
print('err', err)
return cond
def found(self, regex, out, err):
_found = re.search(regex, out, re.MULTILINE)
return self._print_when_false(_found, out, err)
def not_found(self, regex, out, err):
_not_found = not re.search(regex, out, re.MULTILINE)
return self._print_when_false(_not_found, out, err)
def test_flags_use_mkl_dnn_on_empty_off_empty(self):
out, err = self.flags_use_mkl_dnn_common({})
assert self.found(self.relu_regex, out, err)
assert self.found(self.ew_add_regex, out, err)
assert self.found(self.matmul_regex, out, err)
def test_flags_use_mkl_dnn_on(self):
env = {str("FLAGS_tracer_mkldnn_ops_on"): str("relu")}
out, err = self.flags_use_mkl_dnn_common(env)
assert self.found(self.relu_regex, out, err)
assert self.not_found(self.ew_add_regex, out, err)
assert self.not_found(self.matmul_regex, out, err)
def test_flags_use_mkl_dnn_on_multiple(self):
env = {str("FLAGS_tracer_mkldnn_ops_on"): str("relu,elementwise_add")}
out, err = self.flags_use_mkl_dnn_common(env)
assert self.found(self.relu_regex, out, err)
assert self.found(self.ew_add_regex, out, err)
assert self.not_found(self.matmul_regex, out, err)
def test_flags_use_mkl_dnn_off(self):
env = {str("FLAGS_tracer_mkldnn_ops_off"): str("matmul")}
out, err = self.flags_use_mkl_dnn_common(env)
assert self.found(self.relu_regex, out, err)
assert self.found(self.ew_add_regex, out, err)
assert self.not_found(self.matmul_regex, out, err)
def test_flags_use_mkl_dnn_off_multiple(self):
env = {str("FLAGS_tracer_mkldnn_ops_off"): str("matmul,relu")}
out, err = self.flags_use_mkl_dnn_common(env)
assert self.not_found(self.relu_regex, out, err)
assert self.found(self.ew_add_regex, out, err)
assert self.not_found(self.matmul_regex, out, err)
def test_flags_use_mkl_dnn_on_off(self):
env = {
str("FLAGS_tracer_mkldnn_ops_on"): str("elementwise_add"),
str("FLAGS_tracer_mkldnn_ops_off"): str("matmul"),
}
out, err = self.flags_use_mkl_dnn_common(env)
assert self.not_found(self.relu_regex, out, err)
assert self.found(self.ew_add_regex, out, err)
assert self.not_found(self.matmul_regex, out, err)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "668ae99b2814d29740d8f4860edf0fef",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 104,
"avg_line_length": 36.75247524752475,
"alnum_prop": 0.6042564655172413,
"repo_name": "luotao1/Paddle",
"id": "b59ce2ee71498f9595dc2c498e4325720a4f0765",
"size": "4323",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/mkldnn/test_flags_mkldnn_ops_on_off.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
import warnings
import numpy as np
import skrf
import pyvisa
from collections import OrderedDict
from collections.abc import Iterable
from . import abcvna
from . import keysight_pna_scpi
class PNA(abcvna.VNA):
"""
Class for modern Keysight/Agilent Performance Network Analyzers
"""
DEFAULT_VISA_ADDRESS = "GPIB::16::INSTR"
NAME = "Keysight PNA"
NPORTS = 2
NCHANNELS = 32
SCPI_VERSION_TESTED = 'A.09.20.08'
def __init__(self, address=DEFAULT_VISA_ADDRESS, **kwargs):
"""
initialization of PNA Class
Parameters
----------
address : str
visa resource string (full string or ip address)
kwargs : dict
interface (str), port (int), timeout (int),
:param address:
:param kwargs:
"""
super(PNA, self).__init__(address, **kwargs)
self.resource.timeout = kwargs.get("timeout", 2000)
self.scpi = keysight_pna_scpi.SCPI(self.resource)
# self.use_binary()
self.use_ascii()
def use_binary(self):
"""setup the analyzer to transfer in binary which is faster, especially for large datasets"""
self.resource.write(':FORM:BORD SWAP')
self.resource.write(':FORM:DATA REAL,64')
self.resource.values_format.use_binary(datatype='d', is_big_endian=False, container=np.array)
def use_ascii(self):
self.resource.write(':FORM:DATA ASCII')
self.resource.values_format.use_ascii(converter='f', separator=',', container=np.array)
@property
def echo(self):
return self.scpi.echo
@echo.setter
def echo(self, onoff):
if onoff in (1, True):
self.scpi.echo = True
elif onoff in (0, False):
self.scpi.echo = False
else:
raise warnings.warn("echo must be a boolean")
@property
def active_channel(self):
old_timeout = self.resource.timeout
self.resource.timeout = 500
try:
channel = self.scpi.query_active_channel()
except pyvisa.VisaIOError:
print("No channel active, using 1")
channel = 1
finally:
self.resource.timeout = old_timeout
return channel
@active_channel.setter
def active_channel(self, channel):
"""
Set the active channel on the analyzer
Parameters
----------
channel : int
Notes
-----
There is no specific command to activate a channel, so we ask which channel we want and then activate the first
trace on that channel. We do this because if the analyzer gets into a state where it doesn't recognize
any activated measurements, the get_snp_network method will fail, and possibly others as well. That is why in
some methods you will see the following line:
self.active_channel = channel = kwargs.get("channel", self.active_channel)
this way we force this property to be set, even if it just resets itself to the same value, but then a trace
will become active and our get_snp_network method will succeed.
"""
# TODO: Good chance this will fail if no measurement is on the set channel, need to think about that...
mnum = self.scpi.query_meas_number_list(channel)[0]
self.scpi.set_selected_meas_by_number(channel, mnum)
return
def sweep(self, **kwargs):
"""
Initialize a fresh sweep of data on the specified channels
Parameters
----------
kwargs : dict
channel ("all", int or list of channels), timeout (milliseconds)
trigger a fresh sweep on the specified channels (default is "all" if no channel specified)
Autoset timeout and sweep mode based upon the analyzers current averaging setting,
and then return to the prior state of continuous trigger or hold.
"""
self.resource.clear()
self.scpi.set_trigger_source("IMM")
original_timeout = self.resource.timeout
# expecting either an int or a list of ints for the channel(s)
channels_to_sweep = kwargs.get("channels", None)
if not channels_to_sweep:
channels_to_sweep = kwargs.get("channel", "all")
if not type(channels_to_sweep) in (list, tuple):
channels_to_sweep = [channels_to_sweep]
channels = self.scpi.query_available_channels()
for i, channel in enumerate(channels):
sweep_mode = self.scpi.query_sweep_mode(channel)
was_continuous = "CONT" in sweep_mode.upper()
sweep_time = self.scpi.query_sweep_time(channel)
averaging_on = self.scpi.query_averaging_state(channel)
averaging_mode = self.scpi.query_averaging_mode(channel)
if averaging_on and "SWE" in averaging_mode.upper():
sweep_mode = "GROUPS"
number_of_sweeps = self.scpi.query_averaging_count(channel)
self.scpi.set_groups_count(channel, number_of_sweeps)
number_of_sweeps *= self.NPORTS
else:
sweep_mode = "SINGLE"
number_of_sweeps = self.NPORTS
channels[i] = {
"cnum": channel,
"sweep_time": sweep_time,
"number_of_sweeps": number_of_sweeps,
"sweep_mode": sweep_mode,
"was_continuous": was_continuous
}
self.scpi.set_sweep_mode(channel, "HOLD")
timeout = kwargs.get("timeout", None) # recommend not setting this variable, as autosetting is preferred
try:
for channel in channels:
import time
if "all" not in channels_to_sweep and channel["cnum"] not in channels_to_sweep:
continue # default for sweep is all, else if we specify, then sweep
if not timeout: # autoset timeout based on sweep time
sweep_time = channel["sweep_time"] * channel[
"number_of_sweeps"] * 1000 # convert to milliseconds, and double for buffer
self.resource.timeout = max(sweep_time * 2, 5000) # give ourselves a minimum 5 seconds for a sweep
else:
self.resource.timeout = timeout
self.scpi.set_sweep_mode(channel["cnum"], channel["sweep_mode"])
self.wait_until_finished()
finally:
self.resource.clear()
for channel in channels:
if channel["was_continuous"]:
self.scpi.set_sweep_mode(channel["cnum"], "CONT")
self.resource.timeout = original_timeout
return
def upload_twoport_calibration(self, cal, port1=1, port2=2, **kwargs):
"""
upload a calibration to the vna, and set correction on all measurements
Parameters
----------
cal : skrf.Calibration
port1: int
port2: int
calibration error terms reference
# forward = (1, 1), reverse = (2, 2)
"directivity": "EDIR",
"source match": "ESRM",
"reflection tracking": "ERFT",
# forward = (2, 1), reverse = (1, 2)
"load match": "ELDM",
"transmission tracking": "ETRT"
"isolation": "EXTLK"
"""
self.active_channel = channel = kwargs.get("channel", self.active_channel)
calname = kwargs.get("calname", "skrf_12term_cal")
calibrations = self.scpi.query_calset_catalog(cnum=channel, form="NAME")
if calname in calibrations:
self.scpi.set_delete_calset(cnum=channel, calset_name=calname)
self.scpi.set_create_calset(cnum=channel, calset_name=calname)
cfs = dict()
for coef, data in cal.coefs_12term.items():
cfs[coef] = skrf.mf.complex2Scalar(data)
for eterm, coef in zip(("EDIR", "ESRM", "ERFT"), ("directivity", "source match", "reflection tracking")):
self.scpi.set_calset_data(channel, eterm, port1, port1, eterm_data=cfs["forward " + coef])
self.scpi.set_calset_data(channel, eterm, port2, port2, eterm_data=cfs["reverse " + coef])
for eterm, coef in zip(("ELDM", "ETRT", "EXTLK"), ("load match", "transmission tracking", "isolation")):
self.scpi.set_calset_data(channel, eterm, port2, port1, eterm_data=cfs["forward " + coef])
self.scpi.set_calset_data(channel, eterm, port1, port2, eterm_data=cfs["reverse " + coef])
self.scpi.set_active_calset(1, calname, True)
def get_snp_network(self, ports, **kwargs):
"""
return n-port network as an Network object
Parameters
----------
ports : Iterable
a iterable of integers designating the ports to query
kwargs : dict
channel(int) [ default 'self.active_channel' ]
sweep(bool) [default True]
name(str) [default \"\"]
f_unit(str) [ default \"GHz\" ]
raw_data(bool) [default False]
Returns
-------
Network
general function to take in a list of ports and return the full snp network as a Network object
"""
self.resource.clear()
# force activate channel to avoid possible errors:
self.active_channel = channel = kwargs.get("channel", self.active_channel)
sweep = kwargs.get("sweep", True)
name = kwargs.get("name", "")
f_unit = kwargs.get("f_unit", "GHz")
raw_data = kwargs.get("raw_data", False)
ports = [int(port) for port in ports] if type(ports) in (list, tuple) else [int(ports)]
if not name:
name = "{:}Port Network".format(len(ports))
if sweep:
self.sweep(channel=channel)
npoints = self.scpi.query_sweep_n_points(channel)
snp_fmt = self.scpi.query_snp_format()
self.scpi.set_snp_format("RI")
if raw_data is True:
if self.scpi.query_channel_correction_state(channel):
self.scpi.set_channel_correction_state(channel, False)
data = self.scpi.query_snp_data(channel, ports)
self.scpi.set_channel_correction_state(channel, True)
else:
data = self.scpi.query_snp_data(channel, ports)
else:
data = self.scpi.query_snp_data(channel, ports)
self.scpi.set_snp_format(snp_fmt) # restore the value before we got the RI data
self.scpi.set_snp_format(snp_fmt) # restore the value before we got the RI data
nrows = int(len(data) / npoints)
nports = int(np.sqrt((nrows - 1)/2))
data = data.reshape([nrows, -1])
fdata = data[0]
sdata = data[1:]
ntwk = skrf.Network()
ntwk.frequency = skrf.Frequency.from_f(fdata, unit="Hz")
ntwk.s = np.empty(shape=(sdata.shape[1], nports, nports), dtype=complex)
for n in range(nports):
for m in range(nports):
i = n * nports + m
ntwk.s[:, m, n] = sdata[i * 2] + 1j * sdata[i * 2 + 1]
ntwk.frequency.unit = f_unit
ntwk.name = name
return ntwk
def get_list_of_traces(self, **kwargs):
self.resource.clear()
traces = []
channels = self.scpi.query_available_channels()
for channel in channels:
meas_list = self.scpi.query_meas_name_list(channel)
if len(meas_list) == 1:
continue # if there isnt a single comma, then there aren't any measurements
parameters = dict([(meas_list[k], meas_list[k + 1]) for k in range(0, len(meas_list) - 1, 2)])
meas_numbers = self.scpi.query_meas_number_list()
for mnum in meas_numbers:
name = self.scpi.query_meas_name_from_number(mnum)
item = {"name": name, "channel": channel, "measurement number": mnum,
"parameter": parameters.get(name, name)}
item["label"] = "{:s} - Chan{:},Meas{:}".format(
item["parameter"], item["channel"], item["measurement number"])
traces.append(item)
return traces
def get_traces(self, traces, **kwargs):
"""
retrieve traces as 1-port networks from a list returned by get_list_of_traces
Parameters
----------
traces : list
list of type that is exported by self.get_list_of_traces
kwargs : dict
sweep (bool), name_prefix (str)
Returns
-------
list
a list of 1-port networks representing the desired traces
Notes
-----
There is no current way to distinguish between traces and 1-port networks within skrf
"""
self.resource.clear()
sweep = kwargs.get("sweep", False)
name_prefix = kwargs.get("name_prefix", "")
if name_prefix:
name_prefix += " - "
channels = OrderedDict()
for trace in traces:
ch = trace["channel"]
if ch not in channels.keys():
channels[ch] = {
"frequency": None,
"traces": list()}
channels[ch]["traces"].append(trace)
if sweep is True:
self.sweep(channels=list(channels.keys()))
traces = []
for ch, ch_data in channels.items():
frequency = ch_data["frequency"] = self.get_frequency()
for trace in ch_data["traces"]:
self.scpi.set_selected_meas_by_number(trace["channel"], trace["measurement number"])
sdata = self.scpi.query_data(trace["channel"], "SDATA")
s = sdata[::2] + 1j * sdata[1::2]
ntwk = skrf.Network()
ntwk.s = s
ntwk.frequency = frequency
ntwk.name = name_prefix + trace.get("parameter", "trace")
traces.append(ntwk)
return traces
def get_frequency(self, **kwargs):
"""
get an skrf.Frequency object for the current channel
Parameters
----------
kwargs : dict
channel (int), f_unit (str)
Returns
-------
skrf.Frequency
"""
self.resource.clear()
channel = kwargs.get("channel", self.active_channel)
sweep_type = self.scpi.query_sweep_type(channel)
if sweep_type in ["LIN", "LOG", "SEGM"]:
freqs = self.scpi.query_sweep_data(channel)
else:
freqs = np.array([self.scpi.query_f_start(channel)])
frequency = skrf.Frequency.from_f(freqs, unit="Hz")
frequency.unit = kwargs.get("f_unit", "Hz")
return frequency
def set_frequency_sweep(self, f_start, f_stop, f_npoints, **kwargs):
f_unit = kwargs.get("f_unit", "hz").lower()
if f_unit != "hz":
f_start = self.to_hz(f_start, f_unit)
f_stop = self.to_hz(f_stop, f_unit)
channel = kwargs.get("channel", self.active_channel)
self.scpi.set_f_start(channel, f_start)
self.scpi.set_f_stop(channel, f_stop)
self.scpi.set_sweep_n_points(f_npoints)
def get_switch_terms(self, ports=(1, 2), **kwargs):
self.resource.clear()
p1, p2 = ports
self.active_channel = channel = kwargs.get("channel", self.active_channel)
measurements = self.get_meas_list()
max_trace = len(measurements)
for meas in measurements: # type: str
try:
trace_num = int(meas[0][-2:].replace("_", ""))
if trace_num > max_trace:
max_trace = trace_num
except ValueError:
pass
forward_name = "CH{:}_FS_P{:d}_{:d}".format(channel, p1, max_trace + 1)
reverse_name = "CH{:}_RS_P{:d}_{:d}".format(channel, p2, max_trace + 2)
self.create_meas(forward_name, 'a{:}b{:},{:}'.format(p2, p2, p1))
self.create_meas(reverse_name, 'a{:}b{:},{:}'.format(p1, p1, p2))
self.sweep(channel=channel)
forward = self.get_measurement(mname=forward_name, sweep=False) # type: skrf.Network
forward.name = "forward switch terms"
reverse = self.get_measurement(mname=reverse_name, sweep=False) # type: skrf.Network
reverse.name = "reverse switch terms"
self.scpi.set_delete_meas(channel, forward_name)
self.scpi.set_delete_meas(channel, reverse_name)
return forward, reverse
def get_measurement(self, mname=None, mnum=None, **kwargs):
"""
get a measurement trace from the analyzer, specified by either name or number
Parameters
----------
mname : str
the name of the measurement, e.g. 'CH1_S11_1'
mnum : int
the number of number of the measurement
kwargs : dict
channel (int), sweep (bool)
Returns
-------
skrf.Network
"""
if mname is None and mnum is None:
raise ValueError("must provide either a measurement mname or a mnum")
channel = kwargs.get("channel", self.active_channel)
if type(mname) is str:
self.scpi.set_selected_meas(channel, mname)
else:
self.scpi.set_selected_meas_by_number(channel, mnum)
return self.get_active_trace_as_network(**kwargs)
def get_active_trace_as_network(self, **kwargs):
"""
get the active trace as a network object
Parameters
----------
kwargs : dict
channel (int), sweep (bool)
Returns
-------
skrf.Network
"""
channel = self.active_channel
sweep = kwargs.get("sweep", False)
if sweep:
self.sweep(channel=channel)
ntwk = skrf.Network()
sdata = self.scpi.query_data(channel)
ntwk.s = sdata[::2] + 1j * sdata[1::2]
ntwk.frequency = self.get_frequency(channel=channel)
return ntwk
def create_meas(self, mname, param, **kwargs):
"""
Create a new measurement trace on the analyzer
Parameters
----------
mname: str
name of the measurement **WARNING**, not all names behave well
param: str
analyzer parameter, e.g.: S11 ; a1/b1,1 ; A/R1,1
kwargs : dict
channel(int)
"""
channel = kwargs.get("channel", self.active_channel)
self.scpi.set_create_meas(channel, mname, param)
self.display_trace(mname)
def display_trace(self, mname, **kwargs):
"""
Display measurement name on the analyzer display window
Parameters:
mname : str
the name of the measurement, e.g. 'CH1_S11_1'
kwargs : dict
channel(int), window_n(int), trace_n(int), display_format(str)
Keyword Arguments
----------------
display_format : str
must be one of: MLINear, MLOGarithmic, PHASe, UPHase, IMAGinary, REAL, POLar, SMITh,
SADMittance, SWR, GDELay, KELVin, FAHRenheit, CELSius
"""
channel = kwargs.get('channel', self.active_channel)
window_n = kwargs.get("window_n", '')
trace_n = kwargs.get("trace_n",
max(self.scpi.query_window_trace_numbers(window_n)) + 1)
display_format = kwargs.get('display_format', 'MLOG')
self.scpi.set_display_trace(window_n, trace_n, mname)
self.scpi.set_selected_meas(channel, mname)
self.scpi.set_display_format(channel, display_format)
def get_meas_list(self, **kwargs):
"""
Convenience function to return a nicely arranged list of the measurement, parameter catalogue
Parameters
----------
kwargs : dict
channel : int
Returns
-------
list
list of tuples of the form: (name, measurement)
Return a list of measurement names on all channels.
If channel is provided to kwargs, then only measurements for that channel are queried
"""
channel = kwargs.get("channel", self.active_channel)
meas_list = self.scpi.query_meas_name_list(channel)
if len(meas_list) == 1:
return None # if there isnt a single comma, then there arent any measurements
return [(meas_list[k], meas_list[k + 1]) for k in range(0, len(meas_list) - 1, 2)]
@property
def ntraces(self):
"""
Get the number of traces on the active channel
Returns
-------
int
The number of measurement traces that exist on the current channel
Notes
-----
Note that this may not be the same as the number of traces displayed because a measurement may exist,
but not be associated with a trace.
"""
meas_list = self.scpi.query_meas_number_list(self.active_channel)
return 0 if meas_list is None else len(meas_list)
class PNAX(PNA):
NAME = "Keysight PNA-X"
NPORTS = 4
NCHANNELS = 32
| {
"content_hash": "9f960bcd30f95167d8493b284db8805e",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 119,
"avg_line_length": 37.306338028169016,
"alnum_prop": 0.5692307692307692,
"repo_name": "jhillairet/scikit-rf",
"id": "19920420c1e5e5a43f1fcf6c0bbf468e9e9ee778",
"size": "21190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skrf/vi/vna/keysight_pna.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Jupyter Notebook",
"bytes": "6908"
},
{
"name": "Python",
"bytes": "1980974"
},
{
"name": "Shell",
"bytes": "219"
},
{
"name": "TypeScript",
"bytes": "1286336"
}
],
"symlink_target": ""
} |
from magnum.api import hooks
# Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'magnum.api.controllers.root.RootController',
'modules': ['magnum.api'],
'debug': True,
'hooks': [
hooks.ContextHook(),
hooks.RPCHook()
],
'acl_public_routes': [
'/'
],
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf
| {
"content_hash": "9851ac2056554e145d17c5a620b91aea",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 61,
"avg_line_length": 19.142857142857142,
"alnum_prop": 0.6007462686567164,
"repo_name": "LaynePeng/magnum",
"id": "7b4c63f04cbdfb1c00cae4720077a8f4d57b3263",
"size": "1108",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magnum/tests/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "1644287"
},
{
"name": "Shell",
"bytes": "24458"
}
],
"symlink_target": ""
} |
__doc__="""
Align selected components, paths (and parts of paths) to the next metric line or guideline above.
"""
import GlyphsApp
Font = Glyphs.font
Doc = Glyphs.currentDocument
Master = Font.selectedFontMaster
allMetrics = [ Master.ascender, Master.capHeight, Master.xHeight, 0.0, Master.descender ] + [ g.y for g in Master.guideLines if g.angle == 0.0 ]
selectedLayer = Font.selectedLayers[0]
try:
selection = selectedLayer.selection()
if selection:
try:
highestPathY = max( n.y for n in selection if type(n) == GSNode )
except:
# No path selected
highestPathY = None
try:
highestCompY = max( (c.bounds.origin.y + c.bounds.size.height) for c in selection if type(c) == GSComponent )
except:
# No component selected
highestCompY = None
highestY = max( y for y in [highestCompY, highestPathY] if y != None )
try:
nextMetricLineAbove = min( ( m for m in allMetrics if m > highestY ) )
except:
nextMetricLineAbove = max( allMetrics )
Font.disableUpdateInterface()
for thisThing in selection:
thisType = type(thisThing)
if thisType == GSNode or thisType == GSComponent:
thisThing.y += ( nextMetricLineAbove - highestY )
Font.enableUpdateInterface()
except Exception, e:
print "Error. Cannot bump selection:"
print selection
print e
| {
"content_hash": "0d6c91b7c733fa0c9e162c38fd75f19c",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 144,
"avg_line_length": 27.229166666666668,
"alnum_prop": 0.7061973986228003,
"repo_name": "weiweihuanghuang/Glyphs-Scripts",
"id": "97c7e35bcb6fcd3bb47df99d0932f9b7572cdc8d",
"size": "1351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Paths/Align to Metrics/Bump up.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "316614"
}
],
"symlink_target": ""
} |
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, grpc_helpers, operations_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
import grpc # type: ignore
from google.cloud.assuredworkloads_v1beta1.types import assuredworkloads
from .base import DEFAULT_CLIENT_INFO, AssuredWorkloadsServiceTransport
class AssuredWorkloadsServiceGrpcTransport(AssuredWorkloadsServiceTransport):
"""gRPC backend transport for AssuredWorkloadsService.
Service to manage AssuredWorkloads.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "assuredworkloads.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: Optional[grpc.Channel] = None,
api_mtls_endpoint: Optional[str] = None,
client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "assuredworkloads.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_workload(
self,
) -> Callable[[assuredworkloads.CreateWorkloadRequest], operations_pb2.Operation]:
r"""Return a callable for the create workload method over gRPC.
Creates Assured Workload.
Returns:
Callable[[~.CreateWorkloadRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_workload" not in self._stubs:
self._stubs["create_workload"] = self.grpc_channel.unary_unary(
"/google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService/CreateWorkload",
request_serializer=assuredworkloads.CreateWorkloadRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_workload"]
@property
def update_workload(
self,
) -> Callable[[assuredworkloads.UpdateWorkloadRequest], assuredworkloads.Workload]:
r"""Return a callable for the update workload method over gRPC.
Updates an existing workload. Currently allows updating of
workload display_name and labels. For force updates don't set
etag field in the Workload. Only one update operation per
workload can be in progress.
Returns:
Callable[[~.UpdateWorkloadRequest],
~.Workload]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_workload" not in self._stubs:
self._stubs["update_workload"] = self.grpc_channel.unary_unary(
"/google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService/UpdateWorkload",
request_serializer=assuredworkloads.UpdateWorkloadRequest.serialize,
response_deserializer=assuredworkloads.Workload.deserialize,
)
return self._stubs["update_workload"]
@property
def restrict_allowed_resources(
self,
) -> Callable[
[assuredworkloads.RestrictAllowedResourcesRequest],
assuredworkloads.RestrictAllowedResourcesResponse,
]:
r"""Return a callable for the restrict allowed resources method over gRPC.
Restrict the list of resources allowed in the
Workload environment. The current list of allowed
products can be found at
https://cloud.google.com/assured-workloads/docs/supported-products
In addition to assuredworkloads.workload.update
permission, the user should also have
orgpolicy.policy.set permission on the folder resource
to use this functionality.
Returns:
Callable[[~.RestrictAllowedResourcesRequest],
~.RestrictAllowedResourcesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restrict_allowed_resources" not in self._stubs:
self._stubs["restrict_allowed_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService/RestrictAllowedResources",
request_serializer=assuredworkloads.RestrictAllowedResourcesRequest.serialize,
response_deserializer=assuredworkloads.RestrictAllowedResourcesResponse.deserialize,
)
return self._stubs["restrict_allowed_resources"]
@property
def delete_workload(
self,
) -> Callable[[assuredworkloads.DeleteWorkloadRequest], empty_pb2.Empty]:
r"""Return a callable for the delete workload method over gRPC.
Deletes the workload. Make sure that workload's direct children
are already in a deleted state, otherwise the request will fail
with a FAILED_PRECONDITION error. In addition to
assuredworkloads.workload.delete permission, the user should
also have orgpolicy.policy.set permission on the deleted folder
to remove Assured Workloads OrgPolicies.
Returns:
Callable[[~.DeleteWorkloadRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_workload" not in self._stubs:
self._stubs["delete_workload"] = self.grpc_channel.unary_unary(
"/google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService/DeleteWorkload",
request_serializer=assuredworkloads.DeleteWorkloadRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_workload"]
@property
def get_workload(
self,
) -> Callable[[assuredworkloads.GetWorkloadRequest], assuredworkloads.Workload]:
r"""Return a callable for the get workload method over gRPC.
Gets Assured Workload associated with a CRM Node
Returns:
Callable[[~.GetWorkloadRequest],
~.Workload]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_workload" not in self._stubs:
self._stubs["get_workload"] = self.grpc_channel.unary_unary(
"/google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService/GetWorkload",
request_serializer=assuredworkloads.GetWorkloadRequest.serialize,
response_deserializer=assuredworkloads.Workload.deserialize,
)
return self._stubs["get_workload"]
@property
def analyze_workload_move(
self,
) -> Callable[
[assuredworkloads.AnalyzeWorkloadMoveRequest],
assuredworkloads.AnalyzeWorkloadMoveResponse,
]:
r"""Return a callable for the analyze workload move method over gRPC.
Analyze if the source Assured Workloads can be moved
to the target Assured Workload
Returns:
Callable[[~.AnalyzeWorkloadMoveRequest],
~.AnalyzeWorkloadMoveResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "analyze_workload_move" not in self._stubs:
self._stubs["analyze_workload_move"] = self.grpc_channel.unary_unary(
"/google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService/AnalyzeWorkloadMove",
request_serializer=assuredworkloads.AnalyzeWorkloadMoveRequest.serialize,
response_deserializer=assuredworkloads.AnalyzeWorkloadMoveResponse.deserialize,
)
return self._stubs["analyze_workload_move"]
@property
def list_workloads(
self,
) -> Callable[
[assuredworkloads.ListWorkloadsRequest], assuredworkloads.ListWorkloadsResponse
]:
r"""Return a callable for the list workloads method over gRPC.
Lists Assured Workloads under a CRM Node.
Returns:
Callable[[~.ListWorkloadsRequest],
~.ListWorkloadsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_workloads" not in self._stubs:
self._stubs["list_workloads"] = self.grpc_channel.unary_unary(
"/google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService/ListWorkloads",
request_serializer=assuredworkloads.ListWorkloadsRequest.serialize,
response_deserializer=assuredworkloads.ListWorkloadsResponse.deserialize,
)
return self._stubs["list_workloads"]
def close(self):
self.grpc_channel.close()
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
@property
def kind(self) -> str:
return "grpc"
__all__ = ("AssuredWorkloadsServiceGrpcTransport",)
| {
"content_hash": "569dfa0e7e4f7a5d9ed61ed13e6f76d6",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 106,
"avg_line_length": 45.12008281573499,
"alnum_prop": 0.6308906529619602,
"repo_name": "googleapis/python-assured-workloads",
"id": "ea872e6c1df958d22f7b57bde6bbfe0b00f18007",
"size": "22393",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/assuredworkloads_v1beta1/services/assured_workloads_service/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "768919"
},
{
"name": "Shell",
"bytes": "30693"
}
],
"symlink_target": ""
} |
"""Test Taint."""
import unittest
from triton import *
class TestTaint(unittest.TestCase):
"""Testing the taint engine."""
def test_known_issues(self):
"""Check tainting result after processing."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86)
Triton.taintRegister(Triton.registers.eax)
inst = Instruction()
# lea eax,[esi+eax*1]
inst.setOpcode(b"\x8D\x04\x06")
Triton.processing(inst)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.eax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.ebx))
def test_taint_memory(self):
"""Check tainting memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isMemoryTainted(0x1000))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintMemory(0x1000)
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(0x1000))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 2)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2002, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2003, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2002, 2)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2003, 2)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x1fff, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2004, 1)))
self.assertFalse(Triton.isMemoryTainted(0x1001))
self.assertFalse(Triton.isMemoryTainted(0x0fff))
Triton.untaintMemory(0x1000)
Triton.untaintMemory(MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isMemoryTainted(0x1000))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 2)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2002, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2003, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2002, 2)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2003, 2)))
def test_taint_register(self):
"""Check over tainting register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.untaintRegister(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.ah)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.eax))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.ax))
Triton.untaintRegister(Triton.registers.ah)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.eax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.ax))
def test_taint_assignement_memory_immediate(self):
"""Check tainting assignment memory <- immediate."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(0x1000)
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.taintAssignment(MemoryAccess(0x1000, 1), Immediate(1, 1))
self.assertFalse(Triton.isMemoryTainted(0x1000))
Triton.taintMemory(0x1000)
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.taintAssignment(MemoryAccess(0x0fff, 2), Immediate(1, 2))
self.assertFalse(Triton.isMemoryTainted(0x1000))
Triton.taintMemory(0x1000)
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.taintAssignment(MemoryAccess(0x0ffe, 2), Immediate(1, 2))
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.taintMemory(MemoryAccess(0x1000, 4))
self.assertTrue(Triton.isMemoryTainted(0x1000))
self.assertTrue(Triton.isMemoryTainted(0x1001))
self.assertTrue(Triton.isMemoryTainted(0x1002))
self.assertTrue(Triton.isMemoryTainted(0x1003))
self.assertFalse(Triton.isMemoryTainted(0x1004))
Triton.taintAssignment(MemoryAccess(0x1001, 1), Immediate(1, 1))
self.assertTrue(Triton.isMemoryTainted(0x1000))
self.assertFalse(Triton.isMemoryTainted(0x1001))
self.assertTrue(Triton.isMemoryTainted(0x1002))
self.assertTrue(Triton.isMemoryTainted(0x1003))
Triton.taintAssignment(MemoryAccess(0x1000, 4), Immediate(1, 4))
self.assertFalse(Triton.isMemoryTainted(0x1000))
self.assertFalse(Triton.isMemoryTainted(0x1001))
self.assertFalse(Triton.isMemoryTainted(0x1002))
self.assertFalse(Triton.isMemoryTainted(0x1003))
def test_taint_assignement_memory_memory(self):
"""Check tainting assignment memory <- memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 1))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
Triton.taintAssignment(MemoryAccess(0x1000, 1), MemoryAccess(0x2000, 1))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x1000, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
Triton.taintAssignment(MemoryAccess(0x1000, 1), MemoryAccess(0x3000, 1))
Triton.taintAssignment(MemoryAccess(0x2000, 1), MemoryAccess(0x3000, 1))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x1000, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintAssignment(MemoryAccess(0x2001, 2), MemoryAccess(0x3000, 1))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
def test_taint_assignement_memory_register(self):
"""Check tainting assignment memory <- register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 8))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 8)))
Triton.taintAssignment(MemoryAccess(0x2002, 2), Triton.registers.ax)
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2002, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2003, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2004, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2005, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2006, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2007, 1)))
Triton.taintMemory(MemoryAccess(0x2000, 8))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 8)))
Triton.taintAssignment(MemoryAccess(0x1fff, 8), Triton.registers.rax)
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x1fff, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2001, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2002, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2003, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2004, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2005, 1)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2006, 1)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2007, 1)))
def test_taint_assignement_register_immediate(self):
"""Check tainting assignment register <- immediate."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignment(Triton.registers.rax, Immediate(1, 8))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_assignement_register_memory(self):
"""Check tainting assignment register <- memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignment(Triton.registers.rax, MemoryAccess(0x2000, 8))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintMemory(MemoryAccess(0x2000, 8))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 8)))
Triton.taintAssignment(Triton.registers.rax, MemoryAccess(0x2000, 8))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignment(Triton.registers.rax, MemoryAccess(0x3000, 8))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_assignement_register_register(self):
"""Check tainting assignment register <- register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignment(Triton.registers.rax, Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.untaintRegister(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintAssignment(Triton.registers.rax, Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.taintRegister(Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.taintAssignment(Triton.registers.rax, Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_union_memory_immediate(self):
"""Check tainting union memory U immediate."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintUnion(MemoryAccess(0x2000, 4), Immediate(1, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.untaintMemory(MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
def test_taint_union_memory_memory(self):
"""Check tainting union memory U memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintUnion(MemoryAccess(0x2000, 4), MemoryAccess(0x3000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x3000, 4)))
Triton.untaintMemory(MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintUnion(MemoryAccess(0x2000, 4), MemoryAccess(0x3000, 4))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x3000, 4)))
Triton.taintMemory(MemoryAccess(0x3000, 4))
Triton.taintUnion(MemoryAccess(0x2000, 4), MemoryAccess(0x3000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x3000, 4)))
def test_taint_union_memory_register(self):
"""Check tainting union memory U register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
Triton.taintMemory(MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.taintUnion(MemoryAccess(0x2000, 4), Triton.registers.rax)
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.untaintMemory(MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnion(MemoryAccess(0x2000, 4), Triton.registers.rax)
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
Triton.taintUnion(MemoryAccess(0x2000, 4), Triton.registers.rax)
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_union_register_immediate(self):
"""Check tainting union register U immediate."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnion(Triton.registers.rax, Immediate(1, 8))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.untaintRegister(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnion(Triton.registers.rax, Immediate(1, 8))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_union_register_memory(self):
"""Check tainting union register U memory."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnion(Triton.registers.rax, MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
Triton.untaintRegister(Triton.registers.rax)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnion(Triton.registers.rax, MemoryAccess(0x2000, 4))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
# !T U T
Triton.untaintRegister(Triton.registers.rax)
Triton.taintMemory(MemoryAccess(0x2000, 4))
Triton.taintUnion(Triton.registers.rax, MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
# T U T
Triton.taintRegister(Triton.registers.rax)
Triton.taintMemory(MemoryAccess(0x2000, 4))
Triton.taintUnion(Triton.registers.rax, MemoryAccess(0x2000, 4))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isMemoryTainted(MemoryAccess(0x2000, 4)))
def test_taint_union_register_register(self):
"""Check tainting union register U register."""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintRegister(Triton.registers.rax)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.taintUnion(Triton.registers.rax, Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.taintRegister(Triton.registers.rbx)
Triton.taintUnion(Triton.registers.rax, Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.untaintRegister(Triton.registers.rax)
Triton.taintRegister(Triton.registers.rbx)
Triton.taintUnion(Triton.registers.rax, Triton.registers.rbx)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rbx))
Triton.untaintRegister(Triton.registers.rax)
Triton.untaintRegister(Triton.registers.rbx)
Triton.taintUnion(Triton.registers.rax, Triton.registers.rbx)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rbx))
def test_taint_get_tainted_registers(self):
"""Get tainted registers"""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
r = Triton.getTaintedRegisters()
self.assertTrue(len(r) == 0)
Triton.taintRegister(Triton.registers.eax)
Triton.taintRegister(Triton.registers.ax)
Triton.taintRegister(Triton.registers.rbx)
Triton.taintRegister(Triton.registers.cl)
Triton.taintRegister(Triton.registers.di)
r = Triton.getTaintedRegisters()
self.assertTrue(Triton.registers.rax in r)
self.assertTrue(Triton.registers.rbx in r)
self.assertTrue(Triton.registers.rcx in r)
self.assertTrue(Triton.registers.rdi in r)
def test_taint_get_tainted_memory(self):
"""Get tainted memory"""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
m = Triton.getTaintedMemory()
self.assertTrue(len(m) == 0)
Triton.taintMemory(0x1000)
Triton.taintMemory(0x2000)
Triton.taintMemory(0x3000)
Triton.taintMemory(MemoryAccess(0x4000, 4))
m = Triton.getTaintedMemory()
self.assertTrue(0x1000 in m)
self.assertTrue(0x2000 in m)
self.assertTrue(0x3000 in m)
self.assertTrue(0x4000 in m)
self.assertTrue(0x4001 in m)
self.assertTrue(0x4002 in m)
self.assertTrue(0x4003 in m)
self.assertFalse(0x5000 in m)
def test_taint_set_register(self):
"""Set taint register"""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
Triton.setTaintRegister(Triton.registers.rax, True)
self.assertTrue(Triton.isRegisterTainted(Triton.registers.rax))
Triton.setTaintRegister(Triton.registers.rax, False)
self.assertFalse(Triton.isRegisterTainted(Triton.registers.rax))
def test_taint_set_memory(self):
"""Set taint memory"""
Triton = TritonContext()
Triton.setArchitecture(ARCH.X86_64)
self.assertFalse(Triton.isMemoryTainted(0x1000))
Triton.setTaintMemory(MemoryAccess(0x1000, 1), True)
self.assertTrue(Triton.isMemoryTainted(0x1000))
Triton.setTaintMemory(MemoryAccess(0x1000, 1), False)
self.assertFalse(Triton.isMemoryTainted(0x1000))
def test_taint_through_pointers(self):
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, False)
ctx.taintRegister(ctx.registers.rax)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rax))
inst = Instruction(b"\x48\x0F\xB6\x18") # movzx rbx,BYTE PTR [rax]
inst.setAddress(0)
ctx.processing(inst)
self.assertFalse(ctx.isRegisterTainted(ctx.registers.rbx))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, True)
ctx.taintRegister(ctx.registers.rax)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rax))
inst = Instruction(b"\x48\x0F\xB6\x18") # movzx rbx,BYTE PTR [rax]
inst.setAddress(0)
ctx.processing(inst)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rbx))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, True)
ctx.taintRegister(ctx.registers.rax)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rax))
inst = Instruction(b"\x48\x89\x18") # mov [rax], rbx
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertFalse(ctx.isMemoryTainted(0))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, True)
ctx.taintRegister(ctx.registers.rbx)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rbx))
inst = Instruction(b"\x48\x89\x18") # mov [rax], rbx
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isMemoryTainted(0))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, True)
ctx.taintRegister(ctx.registers.rax)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rax))
inst = Instruction(b"\x48\x31\x18") # xor [rax], rbx
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertFalse(ctx.isMemoryTainted(0))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, True)
ctx.taintRegister(ctx.registers.rbx)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rbx))
inst = Instruction(b"\x48\x31\x18") # xor [rax], rbx
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isMemoryTainted(0))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, True)
ctx.taintMemory(0)
inst = Instruction(b"\x48\x31\x18") # xor [rax], rbx
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isMemoryTainted(0))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, True)
ctx.taintMemory(0)
inst = Instruction(b"\x48\x33\x18") # xor rbx, [rax]
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rbx))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, True)
ctx.taintRegister(ctx.registers.rax)
inst = Instruction(b"\x48\x33\x18") # xor rbx, [rax]
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rbx))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, True)
ctx.taintRegister(ctx.registers.rbx)
inst = Instruction(b"\x48\x33\x18") # xor rbx, [rax]
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rbx))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, False)
ctx.taintRegister(ctx.registers.rax)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rax))
inst = Instruction(b"\x48\x31\x18") # xor [rax], rbx
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertFalse(ctx.isMemoryTainted(0))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, False)
ctx.taintRegister(ctx.registers.rbx)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rbx))
inst = Instruction(b"\x48\x31\x18") # xor [rax], rbx
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isMemoryTainted(0))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, False)
ctx.taintMemory(0)
inst = Instruction(b"\x48\x31\x18") # xor [rax], rbx
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isMemoryTainted(0))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, False)
ctx.taintMemory(0)
inst = Instruction(b"\x48\x33\x18") # xor rbx, [rax]
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rbx))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, False)
ctx.taintRegister(ctx.registers.rax)
inst = Instruction(b"\x48\x33\x18") # xor rbx, [rax]
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertFalse(ctx.isRegisterTainted(ctx.registers.rbx))
###########
ctx = TritonContext()
ctx.setArchitecture(ARCH.X86_64)
ctx.setMode(MODE.TAINT_THROUGH_POINTERS, False)
ctx.taintRegister(ctx.registers.rbx)
inst = Instruction(b"\x48\x33\x18") # xor rbx, [rax]
inst.setAddress(0x1000)
ctx.processing(inst)
self.assertTrue(ctx.isRegisterTainted(ctx.registers.rbx))
| {
"content_hash": "d2e5322ce219e3af8a5a2c20186cbb66",
"timestamp": "",
"source": "github",
"line_count": 673,
"max_line_length": 80,
"avg_line_length": 40.25854383358098,
"alnum_prop": 0.6895253561674172,
"repo_name": "JonathanSalwan/Triton",
"id": "5b29bf5b1c298f100e87dba80a75f4b446feba36",
"size": "27133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/testers/unittests/test_taint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "245885"
},
{
"name": "C++",
"bytes": "6814122"
},
{
"name": "CMake",
"bytes": "41282"
},
{
"name": "Dockerfile",
"bytes": "2713"
},
{
"name": "Makefile",
"bytes": "833"
},
{
"name": "Python",
"bytes": "1766960"
},
{
"name": "Ruby",
"bytes": "148185"
},
{
"name": "SMT",
"bytes": "3094"
},
{
"name": "Shell",
"bytes": "4009"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import sys
import os
import logging
import argparse
import multiprocessing
from logging.config import dictConfig
import numpy as np
from future.utils import raise_
from sqlalchemy import create_engine
from sqlalchemy.orm import joinedload
from progressbar import (ProgressBar, Timer, SimpleProgress, Bar, Percentage, ETA)
import pyorganism as pyorg
import pyorganism.regulation as pyreg
import pyorganism.io.models as pymodels
import pyorganism.io.microarray as pymicro
LOGGER = logging.getLogger()
LOGGER.addHandler(logging.StreamHandler())
def check_path(path):
if not os.path.isfile(path):
raise IOError("file does not exist '{path}'".format(path=path))
def string_pair(key, value, level, sep=": "):
return "{0}{1}{2}".format(str(key), sep, str(value)).rjust(level)
def print_dict(dic, level=0):
message = list()
for (key, value) in dic.iteritems():
if isinstance(value, dict):
message.append(string_pair(key, "{", level))
message.extend(print_dict(value, level + 2))
message.append(string_pair("", "}", level, sep=""))
elif isinstance(value, list):
message.append(string_pair(key, "[...]", level))
elif isinstance(value, set):
message.append(string_pair(key, "{...}", level))
else:
message.append(string_pair(key, value, level))
return message
##############################################################################
# Discrete
##############################################################################
def load_discrete(organism, config):
LOGGER.info("Loading differentially expressed genes:")
experiments = config["experiments"]
for (filename, name, reader, extra) in zip(experiments["paths"],
experiments["names"], experiments["readers"],
experiments["args"]):
reader_func = getattr(pymicro, reader)
path = os.path.join(experiments["base"], filename)
check_path(path)
LOGGER.info(" %s: '%s'", name, path)
organism.activity[name] = reader_func(path)
def simple_discrete(control_type, df, name2gene):
eligible = df["name"].notnull()
active = [name2gene[name] for name in df["name"][eligible]\
if name2gene[name] is not None]
LOGGER.info(" mapped %d/%d active genes (%.2f%%)", len(active), len(df),
len(active) / len(df) * 100.0)
results = dict()
results["gene"] = active
if control_type == "digital":
results["gene"] = pyreg.active_genes_and_tf(active)
results["tu"] = pyreg.active_tu(active)
results["operon"] = pyreg.active_operons(active)
return results
def ratio_discrete(control_type, df, name2gene):
eligible = df["name"].notnull()
up = (df["ratio (A/B)"] > 1.0) & eligible
down = (df["ratio (A/B)"] < 1.0) & eligible
up_reg = [name2gene[name] for name in df["name"][up]\
if name2gene[name] is not None]
total = up.sum()
LOGGER.info(" mapped %d/%d up-regulated genes (%.2f%%)", len(up_reg), total,
len(up_reg) / total * 100.0)
down_reg = [name2gene[name] for name in df["name"][down]\
if name2gene[name] is not None]
total = down.sum()
LOGGER.info(" mapped %d/%d down-regulated genes (%.2f%%)", len(down_reg),
total, len(down_reg) / total * 100.0)
results = dict()
results["gene"] = dict()
results["gene"]["up"] = up_reg
results["gene"]["down"] = down_reg
results["tu"] = dict()
results["tu"]["up"] = pyreg.active_tu(up_reg)
results["tu"]["down"] = pyreg.active_tu(down_reg)
results["operon"] = dict()
results["operon"]["up"] = pyreg.active_operons(up_reg)
results["operon"]["down"] = pyreg.active_operons(down_reg)
return results
def discrete_jobs(organism, config, *args):
LOGGER.info("Generating discrete job specifications:")
jobs = list()
analysis = config["analysis"]
for version in config["versions"]:
for (cntrl_name, experiments, setups,
control, ctc, measures, random_num, robustness_num,
rob_extra, projections) in zip(analysis["control_types"],
analysis["experimental_sets"], analysis["experimental_setups"],
analysis["control"], analysis["ctc"],
analysis["measures"], analysis["random_num"],
analysis["robustness_num"], analysis["robustness_args"],
config["network"]["projections"]):
for method in ctc:
for basis in projections:
for ms_name in measures:
for (exp_name, exp_setup) in zip(experiments, setups):
if exp_setup == "ratio_discrete":
for direction in ["up", "down"]:
spec = dict()
spec["version"] = version
spec["continuous"] = config["continuous"]
spec["control_type"] = cntrl_name
spec["experiment"] = exp_name
spec["projection"] = basis
spec["setup"] = exp_setup
spec["direction"] = direction
spec["control"] = control
spec["ctc"] = method
spec["measure"] = ms_name
spec["random_num"] = random_num
spec["robustness_num"] = robustness_num
spec["robustness_args"] = rob_extra
jobs.append(spec)
else:
spec = dict()
spec["version"] = version
spec["continuous"] = config["continuous"]
spec["control_type"] = cntrl_name
spec["experiment"] = exp_name
spec["projection"] = basis
spec["setup"] = exp_setup
spec["control"] = control
spec["ctc"] = method
spec["measure"] = ms_name
spec["random_num"] = random_num
spec["robustness_num"] = robustness_num
spec["robustness_args"] = rob_extra
jobs.append(spec)
LOGGER.info(" %d jobs", len(jobs))
return jobs
def discrete_worker(spec):
LOGGER.debug(spec)
version = spec["version"]
cntrl_type = spec["control_type"]
global_vars = globals()
control = getattr(pyreg, spec["control"])
ctc = getattr(pyreg, spec["ctc"])
measure = getattr(pyreg, spec["measure"])
net = global_vars["networks"][version][cntrl_type][spec["projection"]]
if cntrl_type == "analog":
active = global_vars["prepared"][version][cntrl_type][spec["experiment"]][spec["projection"]][spec["direction"]]
else:
active = global_vars["prepared"][version][cntrl_type][spec["experiment"]][spec["projection"]]
LOGGER.debug(len(active))
effective = pyreg.effective_network(net, active)
res_cntrl = control(effective, measure=measure)
(res_ctc, samples) = ctc(effective, net, measure=measure,
random_num=spec["random_num"], return_sample=True)
return (spec, res_cntrl, res_ctc, samples)
def discrete_result(manager, spec, res_cntrl, res_ctc, samples):
manager.append(version=spec["version"], control_type=spec["control_type"],
continuous=spec["continuous"], strain=spec["experiment"],
projection=spec["projection"], setup=spec["setup"], control_strength=res_cntrl,
control_method=spec["control"], ctc=res_ctc, ctc_method=spec["ctc"],
measure=spec["measure"], direction=spec.get("direction", None),
samples=samples)
def main_discrete(args):
tasks = args[0]
bar = ProgressBar(maxval=len(tasks), widgets=[Timer(), " ",
SimpleProgress(), " ", Percentage(), " ", Bar(), " ",
ETA()]).start()
bar.finish()
pass
##############################################################################
# Continuous
##############################################################################
def normed(session, experiment):
series = pymodels.Expression.load_frame(session, experiment)
return series.apply(pyorg.norm_zero2unity, axis=1, raw=True)
def shuffle_feature(session, experiment):
series = normed(session, experiment)
# shuffles rows relative to index (the features)
np.random.shuffle(series.values)
return series
def shuffle_series(session, experiment):
series = normed(session, experiment)
# shuffles columns relative to column names (time points)
np.random.shuffle(series.values.T)
return series
def shuffle_all(session, experiment):
series = normed(session, experiment)
# reshuffles all values (flat iterator over all values in the 2D array)
np.random.shuffle(series.values.flat)
return series
def continuous_exec(args):
(control, points, measure, random_num, delay, job_id) = args
if "comparison" in measure:
points = points[1:]
# include points somehow in the results
(z_scores, ctrl_scores, samples) = control.series_ctc(measure, random_num,
delay)
return (job_id, z_scores, ctrl_scores, samples, points)
def main_continuous(args):
glbls = globals()
engine = create_engine(args.engine)
pymodels.Base.metadata.bind = engine
pymodels.Session.configure(bind=engine)
session = pymodels.Session()
tasks = session.query(pymodels.Job).\
options(joinedload("analysis"), joinedload("control"),
joinedload("experiment")).filter(~pymodels.Job.complete).all()
if len(tasks) == 0:
LOGGER.warn("Nothing to do")
return
analysis_configs = {job.analysis for job in tasks}
control_configs = {job.control for job in tasks}
experiments = {job.experiment for job in tasks}
preparations = {job.preparation for job in tasks}
sampling = {job.sampling for job in tasks}
projections = {job.projection for job in tasks}
LOGGER.debug("%d analysis configurations", len(analysis_configs))
LOGGER.debug("%d control configurations", len(control_configs))
LOGGER.debug("%d experiments", len(experiments))
LOGGER.debug("%d setup cases", len(preparations))
LOGGER.debug("%d sampling methods", len(sampling))
LOGGER.debug("%d network projections", len(projections))
num_prep = len(analysis_configs) * len(control_configs) * len(experiments)\
* len(preparations) * len(sampling) * len(projections)
LOGGER.debug("%d total configurations", num_prep)
LOGGER.info("Preparing Data")
task_args = dict()
bar = ProgressBar(maxval=num_prep, widgets=[Timer(), " ",
SimpleProgress(), " ", Percentage(), " ", Bar(), " ",
ETA()]).start()
for anal in analysis_configs:
LOGGER.debug(" %s:", anal.version)
feature2node = pyorg.read_pickle(os.path.join(anal.objects, anal.map))
for cntrl in control_configs:
LOGGER.debug(" %s", cntrl.type)
net = pyorg.read_pickle(os.path.join(anal.objects, cntrl.network))
tu_net = pyreg.to_transcription_unit_based(net)
op_net = pyreg.to_operon_based(net)
for exp in experiments:
LOGGER.debug(" %s", exp.strain)
for prep in preparations:
LOGGER.debug(" %s", prep)
series = glbls[prep](session, exp)
for sampl in sampling:
LOGGER.debug(" %s", sampl)
for prj in projections:
LOGGER.debug(" %s", prj)
control = pyreg.ContinuousControl()
if prj == "tu":
control.setup(tu_net, series, feature2node, sampl)
elif prj == "operon":
control.setup(op_net, series, feature2node, sampl)
else:
control.setup(net, series, feature2node, sampl)
if cntrl.type == "analog":
control.from_gpn()
elif cntrl.type == "digital":
control.from_trn()
else:
raise ValueError("'{}'".format(cntrl.type))
task_args[(anal.id, cntrl.id, exp.id, prep, sampl,
prj)] = (control, series.columns)
bar += 1
bar.finish()
LOGGER.info("Running Jobs")
tasks = [task_args[(job.analysis.id, job.control.id, job.experiment.id,
job.preparation, job.sampling, job.projection)] + (job.measure,
job.random_num, job.delay, job.id) for job in tasks]
pool = multiprocessing.Pool(args.nproc)
result_it = pool.imap_unordered(continuous_exec, tasks)
bar = ProgressBar(maxval=len(tasks), widgets=[Timer(), " ",
SimpleProgress(), " ", Percentage(), " ", Bar(), " ",
ETA()]).start()
for (job_id, z_scores, cntrl_scores, samples, points) in result_it:
results = list()
try:
job = session.query(pymodels.Job).filter_by(id=job_id).one()
for (i, name) in enumerate(points):
res = pymodels.Result(control=cntrl_scores[i], ctc=z_scores[i],
point=name, job=job)
session.add(res)
results.append(res)
job.complete = True
session.commit()
except Exception:
session.rollback()
bar += 1
continue
if job.selection > 0:
try:
for (i, res) in enumerate(results):
# use a more low-level insert for speed
session.execute(pymodels.RandomSample.__table__.insert(),
[{"control": val, "result_id": res.id}\
for val in np.random.choice(samples[i], job.selection,
replace=False)])
session.commit()
except Exception:
session.rollback()
bar += 1
continue
bar += 1
bar.finish()
session.close()
##############################################################################
# Main
##############################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=None)
parser.add_argument("-v", "--version", action="version", version="0.1")
parser.add_argument("--log-level", dest="log_level", default="INFO",
help="Log level, i.e., DEBUG, INFO, WARN, ERROR, CRITICAL (default: %(default)s)")
parser.add_argument("--encoding", dest="encoding", default="utf-8",
help="File encoding to assume (default: %(default)s)")
parser.add_argument("-n", "--nproc", dest="nproc",
default=multiprocessing.cpu_count(), type=int,
help="Number of processors to use (default: %(default)s)")
parser.add_argument("engine",
help="Database connection string, e.g., 'sqlite+pysqlite:///file.db'")
subparsers = parser.add_subparsers(help="sub-command help")
# discrete
parser_discrete = subparsers.add_parser("discrete",
help="Perform a discrete control analysis")
parser_discrete.set_defaults(func=main_discrete)
# continuous
parser_continuous = subparsers.add_parser("continuous",
help="Perform a continuous control analysis")
parser_continuous.set_defaults(func=main_continuous)
args = parser.parse_args()
dictConfig({"version": 1, "incremental": True,
"root": {"level": args.log_level}})
if args.log_level != "DEBUG":
logging.getLogger("pyorganism").setLevel(logging.WARN)
try:
sys.exit(args.func(args))
except: # we want to catch everything
(err, msg, trace) = sys.exc_info()
# do something
raise_(err, msg, trace)
finally:
logging.shutdown()
| {
"content_hash": "cee58eaac8f817eced3999fcee31c014",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 120,
"avg_line_length": 43.98416886543536,
"alnum_prop": 0.5451709658068387,
"repo_name": "Midnighter/pyorganism",
"id": "3daf6b498854a19a70ac3ec189d0e1ea82114389",
"size": "16718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/control_multiprocessing.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3259"
},
{
"name": "Python",
"bytes": "533487"
},
{
"name": "Shell",
"bytes": "3679"
}
],
"symlink_target": ""
} |
from cgml.constants import SCHEMA_IDS as SID
from cgml.validators import validateSchema
def makeSchema(n_in=None,
n_out=None,
nLayers=1,
inputDropRate=2,
modelType=None,
costFunction=None,
activationFunction="tanh",
useDropout=True):
last_n_in = n_in
layers = []
inputDropoutRate = (0.2 if useDropout else 0.0)
dropoutRate = (0.5 if useDropout else 0.0)
if nLayers > 1:
for i in range(nLayers - 1):
curr_n_out = int(round(last_n_in / inputDropRate))
if curr_n_out <= n_out:
curr_n_out = n_out
layer = {SID.LAYER_NAME: "hidden{0}".format(i),
SID.LAYER_N_IN: last_n_in,
SID.LAYER_N_OUT: curr_n_out,
SID.LAYER_ACTIVATION: activationFunction,
SID.LAYER_DROPOUT: (inputDropoutRate if i == 0 else dropoutRate)}
layers.append(layer)
last_n_in = curr_n_out
# No dropout with nLayers == 1, which is linear model
lastLayer = {SID.LAYER_NAME: "output",
SID.LAYER_N_IN: last_n_in,
SID.LAYER_N_OUT: n_out,
SID.LAYER_ACTIVATION: "linear",
SID.LAYER_DROPOUT: (0.0 if nLayers == 1 else dropoutRate)}
layers.append(lastLayer)
schema = {SID.DESCRIPTION: "schema by maker",
SID.MODEL_TYPE: modelType,
SID.SUPERVISED_COST: {SID.COST_NAME: "output",
SID.COST_TYPE: costFunction},
SID.GRAPH: layers}
validateSchema(schema)
return schema
| {
"content_hash": "0c0760cb03636a3887a4d81b033bdec1",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 86,
"avg_line_length": 30.982142857142858,
"alnum_prop": 0.515850144092219,
"repo_name": "terkkila/cgml",
"id": "3f2606b5c2a9ef11024cb1c59f7678d0fc7cabe4",
"size": "1736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cgml/makers/make_schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "140"
},
{
"name": "Makefile",
"bytes": "158"
},
{
"name": "Python",
"bytes": "110591"
}
],
"symlink_target": ""
} |
"""ResNet50+ConvDet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
from utils import util
from easydict import EasyDict as edict
import numpy as np
import tensorflow as tf
from nn_skeleton import ModelSkeleton
class ResNet50ConvDet(ModelSkeleton):
def __init__(self, mc, gpu_id=0):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph()
self._add_interpretation_graph()
self._add_loss_graph()
self._add_train_graph()
self._add_viz_graph()
def _add_forward_graph(self):
"""NN architecture."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
conv1 = self._conv_bn_layer(
self.image_input, 'conv1', 'bn_conv1', 'scale_conv1', filters=64,
size=7, stride=2, freeze=True, conv_with_bias=True)
pool1 = self._pooling_layer(
'pool1', conv1, size=3, stride=2, padding='VALID')
with tf.variable_scope('conv2_x') as scope:
with tf.variable_scope('res2a'):
branch1 = self._conv_bn_layer(
pool1, 'res2a_branch1', 'bn2a_branch1', 'scale2a_branch1',
filters=256, size=1, stride=1, freeze=True, relu=False)
branch2 = self._res_branch(
pool1, layer_name='2a', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res2b'):
branch2 = self._res_branch(
res2a, layer_name='2b', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2b = tf.nn.relu(res2a+branch2, 'relu')
with tf.variable_scope('res2c'):
branch2 = self._res_branch(
res2b, layer_name='2c', in_filters=64, out_filters=256,
down_sample=False, freeze=True)
res2c = tf.nn.relu(res2b+branch2, 'relu')
with tf.variable_scope('conv3_x') as scope:
with tf.variable_scope('res3a'):
branch1 = self._conv_bn_layer(
res2c, 'res3a_branch1', 'bn3a_branch1', 'scale3a_branch1',
filters=512, size=1, stride=2, freeze=True, relu=False)
branch2 = self._res_branch(
res2c, layer_name='3a', in_filters=128, out_filters=512,
down_sample=True, freeze=True)
res3a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res3b'):
branch2 = self._res_branch(
res3a, layer_name='3b', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3b = tf.nn.relu(res3a+branch2, 'relu')
with tf.variable_scope('res3c'):
branch2 = self._res_branch(
res3b, layer_name='3c', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3c = tf.nn.relu(res3b+branch2, 'relu')
with tf.variable_scope('res3d'):
branch2 = self._res_branch(
res3c, layer_name='3d', in_filters=128, out_filters=512,
down_sample=False, freeze=True)
res3d = tf.nn.relu(res3c+branch2, 'relu')
with tf.variable_scope('conv4_x') as scope:
with tf.variable_scope('res4a'):
branch1 = self._conv_bn_layer(
res3d, 'res4a_branch1', 'bn4a_branch1', 'scale4a_branch1',
filters=1024, size=1, stride=2, relu=False)
branch2 = self._res_branch(
res3d, layer_name='4a', in_filters=256, out_filters=1024,
down_sample=True)
res4a = tf.nn.relu(branch1+branch2, 'relu')
with tf.variable_scope('res4b'):
branch2 = self._res_branch(
res4a, layer_name='4b', in_filters=256, out_filters=1024,
down_sample=False)
res4b = tf.nn.relu(res4a+branch2, 'relu')
with tf.variable_scope('res4c'):
branch2 = self._res_branch(
res4b, layer_name='4c', in_filters=256, out_filters=1024,
down_sample=False)
res4c = tf.nn.relu(res4b+branch2, 'relu')
with tf.variable_scope('res4d'):
branch2 = self._res_branch(
res4c, layer_name='4d', in_filters=256, out_filters=1024,
down_sample=False)
res4d = tf.nn.relu(res4c+branch2, 'relu')
with tf.variable_scope('res4e'):
branch2 = self._res_branch(
res4d, layer_name='4e', in_filters=256, out_filters=1024,
down_sample=False)
res4e = tf.nn.relu(res4d+branch2, 'relu')
with tf.variable_scope('res4f'):
branch2 = self._res_branch(
res4e, layer_name='4f', in_filters=256, out_filters=1024,
down_sample=False)
res4f = tf.nn.relu(res4e+branch2, 'relu')
dropout4 = tf.nn.dropout(res4f, self.keep_prob, name='drop4')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer(
'conv5', dropout4, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
def _res_branch(
self, inputs, layer_name, in_filters, out_filters, down_sample=False,
freeze=False):
"""Residual branch constructor.
Args:
inputs: input tensor
layer_name: layer name
in_filters: number of filters in XX_branch2a and XX_branch2b layers.
out_filters: number of filters in XX_branch2clayers.
donw_sample: if true, down sample the input feature map
freeze: if true, do not change parameters in this layer
Returns:
A residual branch output operation.
"""
with tf.variable_scope('res'+layer_name+'_branch2'):
stride = 2 if down_sample else 1
output = self._conv_bn_layer(
inputs,
conv_param_name='res'+layer_name+'_branch2a',
bn_param_name='bn'+layer_name+'_branch2a',
scale_param_name='scale'+layer_name+'_branch2a',
filters=in_filters, size=1, stride=stride, freeze=freeze)
output = self._conv_bn_layer(
output,
conv_param_name='res'+layer_name+'_branch2b',
bn_param_name='bn'+layer_name+'_branch2b',
scale_param_name='scale'+layer_name+'_branch2b',
filters=in_filters, size=3, stride=1, freeze=freeze)
output = self._conv_bn_layer(
output,
conv_param_name='res'+layer_name+'_branch2c',
bn_param_name='bn'+layer_name+'_branch2c',
scale_param_name='scale'+layer_name+'_branch2c',
filters=out_filters, size=1, stride=1, freeze=freeze, relu=False)
return output
| {
"content_hash": "85edf261f11b407b5b97f0809898e4ac",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 76,
"avg_line_length": 40.616766467065865,
"alnum_prop": 0.6072534276868642,
"repo_name": "BichenWuUCB/squeezeDet",
"id": "b64632d4f6dbee38682ee65cc4db0470fc35ecd2",
"size": "6837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nets/resnet50_convDet.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "31297"
},
{
"name": "Makefile",
"bytes": "123"
},
{
"name": "Python",
"bytes": "129931"
},
{
"name": "Shell",
"bytes": "3518"
}
],
"symlink_target": ""
} |
"""
Utility class for network related operations.
"""
import sys
import uuid
if sys.platform == 'win32':
import wmi
from oslo_log import log as logging
from os_windows._i18n import _, _LE
from os_windows import exceptions
from os_windows.utils import constants
from os_windows.utils import jobutils
LOG = logging.getLogger(__name__)
class NetworkUtils(object):
_ETHERNET_SWITCH_PORT = 'Msvm_SwitchPort'
_SWITCH_LAN_ENDPOINT = 'Msvm_SwitchLanEndpoint'
_VIRTUAL_SWITCH = 'Msvm_VirtualSwitch'
_BINDS_TO = 'Msvm_BindsTo'
_VLAN_ENDPOINT_SET_DATA = 'Msvm_VLANEndpointSettingData'
def __init__(self):
self._jobutils = jobutils.JobUtils()
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization')
def get_external_vswitch(self, vswitch_name):
if vswitch_name:
vswitches = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name)
else:
# Find the vswitch that is connected to the first physical nic.
ext_port = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]
port = ext_port.associators(wmi_result_class='Msvm_SwitchPort')[0]
vswitches = port.associators(wmi_result_class='Msvm_VirtualSwitch')
if not len(vswitches):
raise exceptions.HyperVException(_('vswitch "%s" not found')
% vswitch_name)
return vswitches[0].path_()
def get_vswitch_id(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
return vswitch.Name
def _get_vswitch(self, vswitch_name):
vswitch = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name)
if not vswitch:
raise exceptions.HyperVException(_('VSwitch not found: %s') %
vswitch_name)
return vswitch[0]
def _get_vswitch_external_port(self, vswitch_name):
ext_ports = self._conn.Msvm_ExternalEthernetPort()
for ext_port in ext_ports:
lan_endpoint_list = ext_port.associators(
wmi_result_class='Msvm_SwitchLanEndpoint')
if lan_endpoint_list:
vswitch_port_list = lan_endpoint_list[0].associators(
wmi_result_class=self._ETHERNET_SWITCH_PORT)
if vswitch_port_list:
vswitch_port = vswitch_port_list[0]
vswitch_list = vswitch_port.associators(
wmi_result_class='Msvm_VirtualSwitch')
if (vswitch_list and
vswitch_list[0].ElementName == vswitch_name):
return vswitch_port
def set_switch_external_port_trunk_vlan(self, vswitch_name, vlan_id,
desired_endpoint_mode):
vswitch_external_port = self._get_vswitch_external_port(vswitch_name)
if vswitch_external_port:
vlan_endpoint = vswitch_external_port.associators(
wmi_association_class=self._BINDS_TO)[0]
vlan_endpoint_settings = vlan_endpoint.associators(
wmi_result_class=self._VLAN_ENDPOINT_SET_DATA)[0]
if vlan_id not in vlan_endpoint_settings.TrunkedVLANList:
vlan_endpoint_settings.TrunkedVLANList += (vlan_id,)
vlan_endpoint_settings.put()
if (desired_endpoint_mode not in
vlan_endpoint.SupportedEndpointModes):
LOG.error(_LE("'Trunk' VLAN endpoint mode is not supported by "
"the switch / physycal network adapter. Correct "
"this issue or use flat networks instead."))
return
if vlan_endpoint.DesiredEndpointMode != desired_endpoint_mode:
vlan_endpoint.DesiredEndpointMode = desired_endpoint_mode
vlan_endpoint.put()
def create_vswitch_port(self, vswitch_path, port_name):
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
# Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(
Name=str(uuid.uuid4()),
FriendlyName=port_name,
ScopeOfResidence="",
VirtualSwitch=vswitch_path)
if ret_val != 0:
raise exceptions.HyperVException(
_("Failed to create vswitch port %(port_name)s on switch "
"%(vswitch_path)s") % {'port_name': port_name,
'vswitch_path': vswitch_path})
return new_port
def vswitch_port_needed(self):
# NOTE(alexpilotti): In WMI V2 the vswitch_path is set in the VM
# setting data without the need for a vswitch port.
return True
def get_switch_ports(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
vswitch_ports = vswitch.associators(
wmi_result_class=self._ETHERNET_SWITCH_PORT)
return set(p.Name for p in vswitch_ports)
def vnic_port_exists(self, port_id):
try:
self._get_vnic_settings(port_id)
except Exception:
return False
return True
def get_vnic_ids(self):
return set(
p.ElementName
for p in self._conn.Msvm_SyntheticEthernetPortSettingData()
if p.ElementName is not None)
def _get_vnic_settings(self, vnic_name):
vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=vnic_name)
if not vnic_settings:
raise exceptions.HyperVException(
message=_('Vnic not found: %s') % vnic_name)
return vnic_settings[0]
def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name):
vnic_settings = self._get_vnic_settings(switch_port_name)
if not vnic_settings.Connection or not vnic_settings.Connection[0]:
port = self.get_port_by_id(switch_port_name, vswitch_name)
if port:
port_path = port.Path_()
else:
port_path = self.create_vswitch_port(
vswitch_name, switch_port_name)
vnic_settings.Connection = [port_path]
self._jobutils.modify_virt_resource(vnic_settings)
def _get_vm_from_res_setting_data(self, res_setting_data):
sd = res_setting_data.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
vm = sd[0].associators(
wmi_result_class='Msvm_ComputerSystem')
return vm[0]
def disconnect_switch_port(self, switch_port_name, vnic_deleted,
delete_port):
"""Disconnects the switch port."""
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
switch_port_path = self._get_switch_port_path_by_name(
switch_port_name)
if not switch_port_path:
# Port not found. It happens when the VM was already deleted.
return
if not vnic_deleted:
(ret_val, ) = switch_svc.DisconnectSwitchPort(
SwitchPort=switch_port_path)
if ret_val != 0:
data = {'switch_port_name': switch_port_name,
'ret_val': ret_val}
raise exceptions.HyperVException(
message=_('Failed to disconnect port %(switch_port_name)s '
'with error %(ret_val)s') % data)
if delete_port:
(ret_val, ) = switch_svc.DeleteSwitchPort(
SwitchPort=switch_port_path)
if ret_val != 0:
data = {'switch_port_name': switch_port_name,
'ret_val': ret_val}
raise exceptions.HyperVException(
message=_('Failed to delete port %(switch_port_name)s '
'with error %(ret_val)s') % data)
def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name):
vlan_endpoint_settings = self._conn.Msvm_VLANEndpointSettingData(
ElementName=switch_port_name)[0]
if vlan_endpoint_settings.AccessVLAN != vlan_id:
vlan_endpoint_settings.AccessVLAN = vlan_id
vlan_endpoint_settings.put()
def _get_switch_port_path_by_name(self, switch_port_name):
vswitch = self._conn.Msvm_SwitchPort(ElementName=switch_port_name)
if vswitch:
return vswitch[0].path_()
def get_port_by_id(self, port_id, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
switch_ports = vswitch.associators(
wmi_result_class=self._ETHERNET_SWITCH_PORT)
for switch_port in switch_ports:
if (switch_port.ElementName == port_id):
return switch_port
def remove_all_security_rules(self, switch_port_name):
pass
def enable_port_metrics_collection(self, switch_port_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
def enable_control_metrics(self, switch_port_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
| {
"content_hash": "69afcb847b94bed9caa544175934b6db",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 42.1447963800905,
"alnum_prop": 0.5867511273351943,
"repo_name": "cloudbase/oslo.windows",
"id": "a9851f3432daea4f97e149fdf8ce962345d2005d",
"size": "9953",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "os_windows/utils/networkutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "312171"
}
],
"symlink_target": ""
} |
from django.conf import settings
from PIL import Image
AVATAR_DEFAULT_SIZE = getattr(settings, 'AVATAR_DEFAULT_SIZE', 80)
AUTO_GENERATE_AVATAR_SIZES = getattr(settings, 'AUTO_GENERATE_AVATAR_SIZES', (AVATAR_DEFAULT_SIZE,))
AVATAR_RESIZE_METHOD = getattr(settings, 'AVATAR_RESIZE_METHOD', Image.ANTIALIAS)
AVATAR_STORAGE_DIR = getattr(settings, 'AVATAR_STORAGE_DIR', 'avatars')
AVATAR_GRAVATAR_BASE_URL = getattr(settings, 'AVATAR_GRAVATAR_BASE_URL', 'http://www.gravatar.com/avatar/')
AVATAR_GRAVATAR_BACKUP = getattr(settings, 'AVATAR_GRAVATAR_BACKUP', True)
AVATAR_GRAVATAR_DEFAULT = getattr(settings, 'AVATAR_GRAVATAR_DEFAULT', None)
AVATAR_DEFAULT_URL = getattr(settings, 'AVATAR_DEFAULT_URL', 'avatar/img/default.jpg')
AVATAR_MAX_AVATARS_PER_USER = getattr(settings, 'AVATAR_MAX_AVATARS_PER_USER', 42)
AVATAR_MAX_SIZE = getattr(settings, 'AVATAR_MAX_SIZE', 1024 * 1024)
AVATAR_THUMB_FORMAT = getattr(settings, 'AVATAR_THUMB_FORMAT', "JPEG")
AVATAR_THUMB_QUALITY = getattr(settings, 'AVATAR_THUMB_QUALITY', 85)
AVATAR_HASH_FILENAMES = getattr(settings, 'AVATAR_HASH_FILENAMES', False)
AVATAR_HASH_USERDIRNAMES = getattr(settings, 'AVATAR_HASH_USERDIRNAMES', False)
AVATAR_ALLOWED_FILE_EXTS = getattr(settings, 'AVATAR_ALLOWED_FILE_EXTS', None)
AVATAR_CACHE_TIMEOUT = getattr(settings, 'AVATAR_CACHE_TIMEOUT', 60 * 60)
AVATAR_STORAGE = getattr(settings, 'AVATAR_STORAGE', settings.DEFAULT_FILE_STORAGE)
AVATAR_CLEANUP_DELETED = getattr(settings, 'AVATAR_CLEANUP_DELETED', False)
| {
"content_hash": "61a078dd025cad5d5aeafc3f4429acf2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 107,
"avg_line_length": 70.61904761904762,
"alnum_prop": 0.7714093054619016,
"repo_name": "kkanahin/django-avatar",
"id": "71a3609d4ada95fc3fe022e8d34670353844565d",
"size": "1483",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "avatar/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Test MiniMap
---------------
"""
import folium
from folium import plugins
from folium.utilities import normalize
def test_minimap():
m = folium.Map(location=(30, 20), zoom_start=4)
minimap = plugins.MiniMap()
m.add_child(minimap)
out = normalize(m._parent.render())
# Verify that a new minimap is getting created.
assert "new L.Control.MiniMap" in out
m = folium.Map(location=(30, 20), zoom_start=4)
minimap = plugins.MiniMap(tile_layer="Stamen Toner")
minimap.add_to(m)
out = normalize(m._parent.render())
# verify that Stamen Toner tiles are being used
assert "https://stamen-tiles" in out
| {
"content_hash": "02c06f7ce57febb88969dea59874b9a1",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 56,
"avg_line_length": 23.285714285714285,
"alnum_prop": 0.6595092024539877,
"repo_name": "python-visualization/folium",
"id": "cdc3fa064997419808803eedc064e1f09c19508a",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/plugins/test_minimap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39887"
},
{
"name": "JavaScript",
"bytes": "268"
},
{
"name": "Python",
"bytes": "375811"
}
],
"symlink_target": ""
} |
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _, ugettext
from satchmo.newsletter.forms import NewsletterForm
def add_subscription(request, template="newsletter/subscribe_form.html",
result_template="newsletter/update_results.html", form=NewsletterForm):
"""Add a subscription and return the results in the requested template."""
return _update(request, True, template, result_template, form=form)
def remove_subscription(request, template="newsletter/unsubscribe_form.html",
result_template="newsletter/update_results.html", form=NewsletterForm):
"""Remove a subscription and return the results in the requested template."""
return _update(request, False, template, result_template, form=form)
def update_subscription(request, template="newsletter/update_form.html",
result_template="newsletter/update_results.html", form=NewsletterForm):
"""Add a subscription and return the results in the requested template."""
return _update(request, 'FORM', template, result_template, form=form)
def _update(request, state, template, result_template, form=NewsletterForm):
"""Add a subscription and return the results in the requested template."""
success = False
result = ""
if request.method == "POST":
workform = form(request.POST)
if workform.is_valid():
if state == 'FORM':
# save with subcription status from form
result = workform.save()
else:
# save with subscription status explicitly set
result = workform.save(state)
success = True
else:
result = ugettext('Error, not valid.')
else:
workform = form()
ctx = RequestContext(request, {
'result' : result,
'form' : workform
})
if success:
return render_to_response(result_template, ctx)
else:
return render_to_response(template, ctx)
| {
"content_hash": "2867f3687927ed65f2189d59a9882ad7",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 81,
"avg_line_length": 37.7962962962963,
"alnum_prop": 0.6805487506124449,
"repo_name": "sankroh/satchmo",
"id": "5e993b0abf7f21769d1041be6ed40932906e3d1a",
"size": "2041",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "satchmo/newsletter/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import unittest
from flake8.api.legacy import get_style_guide
class TestFlake8Compliance(unittest.TestCase):
def test_flake8(self):
style_guide = get_style_guide()
report = style_guide.check_files()
self.assertEqual(report.get_statistics('E'), [])
| {
"content_hash": "b0fccf75b9830cc275dc3baec21c28ba",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 56,
"avg_line_length": 30.88888888888889,
"alnum_prop": 0.697841726618705,
"repo_name": "EmilStenstrom/json-traverse",
"id": "3ca174d9f565e308b2f7e1023a25de4d1c90aa30",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_style.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12053"
}
],
"symlink_target": ""
} |
import ipv6
import network_layer
import config
import mle
from enum import IntEnum
class CheckType(IntEnum):
CONTAIN = 0
NOT_CONTAIN = 1
OPTIONAL = 2
def check_address_query(command_msg, source_node, destination_address):
"""Verify source_node sent a properly formatted Address Query Request message to the destination_address.
"""
command_msg.assertCoapMessageContainsTlv(network_layer.TargetEid)
source_rloc = source_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert ipv6.ip_address(source_rloc) == command_msg.ipv6_packet.ipv6_header.source_address, \
"Error: The IPv6 source address is not the RLOC of the originator. The source node's rloc is: " \
+ str(ipv6.ip_address(source_rloc)) + ", but the source_address in command msg is: " \
+ str(command_msg.ipv6_packet.ipv6_header.source_address)
assert ipv6.ip_address(destination_address.decode('utf-8')) == command_msg.ipv6_packet.ipv6_header.destination_address, "Error: The IPv6 destination address is not expected."
def check_address_notification(command_msg, source_node, destination_node):
"""Verify source_node sent a properly formatted Address Notification command message to destination_node.
"""
command_msg.assertCoapMessageRequestUriPath('/a/an')
command_msg.assertCoapMessageContainsTlv(network_layer.TargetEid)
command_msg.assertCoapMessageContainsTlv(network_layer.Rloc16)
command_msg.assertCoapMessageContainsTlv(network_layer.MlEid)
source_rloc = source_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert ipv6.ip_address(source_rloc) == command_msg.ipv6_packet.ipv6_header.source_address, "Error: The IPv6 source address is not the RLOC of the originator."
destination_rloc = destination_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert ipv6.ip_address(destination_rloc) == command_msg.ipv6_packet.ipv6_header.destination_address, "Error: The IPv6 destination address is not the RLOC of the destination."
def check_address_error_notification(command_msg, source_node, destination_address):
"""Verify source_node sent a properly formatted Address Error Notification command message to destination_address.
"""
command_msg.assertCoapMessageRequestUriPath('/a/ae')
command_msg.assertCoapMessageContainsTlv(network_layer.TargetEid)
command_msg.assertCoapMessageContainsTlv(network_layer.MlEid)
source_rloc = source_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert ipv6.ip_address(source_rloc) == command_msg.ipv6_packet.ipv6_header.source_address, \
"Error: The IPv6 source address is not the RLOC of the originator. The source node's rloc is: " \
+ str(ipv6.ip_address(source_rloc)) + ", but the source_address in command msg is: " \
+ str(command_msg.ipv6_packet.ipv6_header.source_address)
assert ipv6.ip_address(destination_address.decode('utf-8')) == command_msg.ipv6_packet.ipv6_header.destination_address, \
"Error: The IPv6 destination address is not expected. The destination node's rloc is: " \
+ str(ipv6.ip_address(destination_address.decode('utf-8'))) + ", but the destination_address in command msg is: " \
+ str(command_msg.ipv6_packet.ipv6_header.destination_address)
def check_address_release(command_msg, destination_node):
"""Verify the message is a properly formatted address release destined to the given node.
"""
command_msg.assertCoapMessageRequestUriPath('/a/ar')
command_msg.assertCoapMessageContainsTlv(network_layer.Rloc16)
command_msg.assertCoapMessageContainsTlv(network_layer.MacExtendedAddress)
destination_rloc = destination_node.get_ip6_address(config.ADDRESS_TYPE.RLOC)
assert ipv6.ip_address(destination_rloc) == command_msg.ipv6_packet.ipv6_header.destination_address, "Error: The destination is not RLOC address"
def check_tlv_request_tlv(command_msg, check_type, tlv_id):
"""Verify if TLV Request TLV contains specified TLV ID
"""
tlv_request_tlv = command_msg.get_mle_message_tlv(mle.TlvRequest)
if check_type == CheckType.CONTAIN:
assert tlv_request_tlv is not None, "Error: The msg doesn't contain TLV Request TLV"
assert any(tlv_id == tlv for tlv in tlv_request_tlv.tlvs), "Error: The msg doesn't contain TLV Request TLV ID: {}".format(tlv_id)
elif check_type == CheckType.NOT_CONTAIN:
if tlv_request_tlv is not None:
assert any(tlv_id == tlv for tlv in tlv_request_tlv.tlvs) is False, "Error: The msg contains TLV Request TLV ID: {}".format(tlv_id)
elif check_type == CheckType.OPTIONAL:
if tlv_request_tlv is not None:
if any(tlv_id == tlv for tlv in tlv_request_tlv.tlvs):
print("TLV Request TLV contains TLV ID: {}".format(tlv_id))
else:
print("TLV Request TLV doesn't contain TLV ID: {}".format(tlv_id))
else:
print("The msg doesn't contain TLV Request TLV")
else:
raise ValueError("Invalid check type")
def check_link_request(command_msg, source_address = CheckType.OPTIONAL, leader_data = CheckType.OPTIONAL, \
tlv_request_address16 = CheckType.OPTIONAL, tlv_request_route64 = CheckType.OPTIONAL, \
tlv_request_link_margin = CheckType.OPTIONAL):
"""Verify a properly formatted Link Request command message.
"""
command_msg.assertMleMessageContainsTlv(mle.Challenge)
command_msg.assertMleMessageContainsTlv(mle.Version)
check_mle_optional_tlv(command_msg, source_address, mle.SourceAddress)
check_mle_optional_tlv(command_msg, leader_data, mle.LeaderData)
check_tlv_request_tlv(command_msg, tlv_request_address16, mle.TlvType.ADDRESS16)
check_tlv_request_tlv(command_msg, tlv_request_route64, mle.TlvType.ROUTE64)
check_tlv_request_tlv(command_msg, tlv_request_link_margin, mle.TlvType.LINK_MARGIN)
def check_link_accept(command_msg, destination_node, \
leader_data = CheckType.OPTIONAL, link_margin = CheckType.OPTIONAL, mle_frame_counter = CheckType.OPTIONAL, \
challenge = CheckType.OPTIONAL, address16 = CheckType.OPTIONAL, route64 = CheckType.OPTIONAL, \
tlv_request_link_margin = CheckType.OPTIONAL):
"""verify a properly formatted link accept command message.
"""
command_msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.Response)
command_msg.assertMleMessageContainsTlv(mle.Version)
check_mle_optional_tlv(command_msg, leader_data, mle.LeaderData)
check_mle_optional_tlv(command_msg, link_margin, mle.LinkMargin)
check_mle_optional_tlv(command_msg, mle_frame_counter, mle.MleFrameCounter)
check_mle_optional_tlv(command_msg, challenge, mle.Challenge)
check_mle_optional_tlv(command_msg, address16, mle.Address16)
check_mle_optional_tlv(command_msg, route64, mle.Route64)
check_tlv_request_tlv(command_msg, tlv_request_link_margin, mle.TlvType.LINK_MARGIN)
destination_link_local = destination_node.get_ip6_address(config.ADDRESS_TYPE.LINK_LOCAL)
assert ipv6.ip_address(destination_link_local) == command_msg.ipv6_packet.ipv6_header.destination_address, \
"Error: The destination is unexpected"
def check_icmp_path(sniffer, path, nodes, icmp_type = ipv6.ICMP_ECHO_REQUEST):
"""Verify icmp message is forwarded along the path.
"""
len_path = len(path)
# Verify icmp message is forwarded to the next node of the path.
for i in range(0, len_path):
node_msg = sniffer.get_messages_sent_by(path[i])
node_icmp_msg = node_msg.get_icmp_message(icmp_type)
if i < len_path - 1:
next_node = nodes[path[i + 1]]
next_node_rloc16 = next_node.get_addr16()
assert next_node_rloc16 == node_icmp_msg.mac_header.dest_address.rloc, "Error: The path is unexpected."
else:
return True
return False
def check_id_set(command_msg, router_id):
"""Check the command_msg's Route64 tlv to verify router_id is an active router.
"""
tlv = command_msg.assertMleMessageContainsTlv(mle.Route64)
return ((tlv.router_id_mask >> (63 - router_id)) & 1)
def get_routing_cost(command_msg, router_id):
"""Check the command_msg's Route64 tlv to get the routing cost to router.
"""
tlv = command_msg.assertMleMessageContainsTlv(mle.Route64)
# Get router's mask pos
# Turn the number into binary string. Need to consider the preceding 0 omitted during conversion.
router_id_mask_str = bin(tlv.router_id_mask).replace('0b','')
prefix_len = 64 - len(router_id_mask_str)
routing_entry_pos = 0
for i in range(0, router_id - prefix_len):
if router_id_mask_str[i] == '1':
routing_entry_pos += 1
assert router_id_mask_str[router_id - prefix_len] == '1', "Error: The router isn't in the topology. \n" \
+ "route64 tlv is: %s. \nrouter_id is: %s. \nrouting_entry_pos is: %s. \nrouter_id_mask_str is: %s." \
%(tlv, router_id, routing_entry_pos, router_id_mask_str)
return tlv.link_quality_and_route_data[routing_entry_pos].route
def check_mle_optional_tlv(command_msg, type, tlv):
if (type == CheckType.CONTAIN):
command_msg.assertMleMessageContainsTlv(tlv)
elif (type == CheckType.NOT_CONTAIN):
command_msg.assertMleMessageDoesNotContainTlv(tlv)
elif (type == CheckType.OPTIONAL):
command_msg.assertMleMessageContainsOptionalTlv(tlv)
else:
raise ValueError("Invalid check type")
def check_mle_advertisement(command_msg):
command_msg.assertSentWithHopLimit(255)
command_msg.assertSentToDestinationAddress(config.LINK_LOCAL_ALL_NODES_ADDRESS)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.LeaderData)
command_msg.assertMleMessageContainsTlv(mle.Route64)
def check_parent_request(command_msg):
"""Verify a properly formatted Parent Request command message.
"""
command_msg.assertSentWithHopLimit(255)
command_msg.assertSentToDestinationAddress(config.LINK_LOCAL_ALL_ROUTERS_ADDRESS)
command_msg.assertMleMessageContainsTlv(mle.Mode)
command_msg.assertMleMessageContainsTlv(mle.Challenge)
command_msg.assertMleMessageContainsTlv(mle.ScanMask)
command_msg.assertMleMessageContainsTlv(mle.Version)
def check_parent_response(command_msg, mle_frame_counter = CheckType.OPTIONAL):
"""Verify a properly formatted Parent Response command message.
"""
command_msg.assertMleMessageContainsTlv(mle.Challenge)
command_msg.assertMleMessageContainsTlv(mle.Connectivity)
command_msg.assertMleMessageContainsTlv(mle.LeaderData)
command_msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
command_msg.assertMleMessageContainsTlv(mle.LinkMargin)
command_msg.assertMleMessageContainsTlv(mle.Response)
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.Version)
check_mle_optional_tlv(command_msg, mle_frame_counter, mle.MleFrameCounter)
def check_child_id_request(command_msg, tlv_request = CheckType.OPTIONAL, \
mle_frame_counter = CheckType.OPTIONAL, address_registration = CheckType.OPTIONAL, \
active_timestamp = CheckType.OPTIONAL, pending_timestamp = CheckType.OPTIONAL):
"""Verify a properly formatted Child Id Request command message.
"""
command_msg.assertMleMessageContainsTlv(mle.LinkLayerFrameCounter)
command_msg.assertMleMessageContainsTlv(mle.Mode)
command_msg.assertMleMessageContainsTlv(mle.Response)
command_msg.assertMleMessageContainsTlv(mle.Timeout)
command_msg.assertMleMessageContainsTlv(mle.Version)
check_mle_optional_tlv(command_msg, tlv_request, mle.TlvRequest)
check_mle_optional_tlv(command_msg, mle_frame_counter, mle.MleFrameCounter)
check_mle_optional_tlv(command_msg, address_registration, mle.AddressRegistration)
check_mle_optional_tlv(command_msg, active_timestamp, mle.ActiveTimestamp)
check_mle_optional_tlv(command_msg, pending_timestamp, mle.PendingTimestamp)
def check_child_id_response(command_msg, route64 = CheckType.OPTIONAL, network_data = CheckType.OPTIONAL, \
address_registration = CheckType.OPTIONAL, active_timestamp = CheckType.OPTIONAL, \
pending_timestamp = CheckType.OPTIONAL, active_operational_dataset = CheckType.OPTIONAL, \
pending_operational_dataset = CheckType.OPTIONAL):
"""Verify a properly formatted Child Id Response command message.
"""
command_msg.assertMleMessageContainsTlv(mle.SourceAddress)
command_msg.assertMleMessageContainsTlv(mle.LeaderData)
command_msg.assertMleMessageContainsTlv(mle.Address16)
check_mle_optional_tlv(command_msg, route64, mle.Route64)
check_mle_optional_tlv(command_msg, network_data, mle.NetworkData)
check_mle_optional_tlv(command_msg, address_registration, mle.AddressRegistration)
check_mle_optional_tlv(command_msg, active_timestamp, mle.ActiveTimestamp)
check_mle_optional_tlv(command_msg, pending_timestamp, mle.PendingTimestamp)
check_mle_optional_tlv(command_msg, active_operational_dataset, mle.ActiveOperationalDataset)
check_mle_optional_tlv(command_msg, pending_operational_dataset, mle.PendingOperationalDataset)
def check_coap_optional_tlv(coap_msg, type, tlv):
if (type == CheckType.CONTAIN):
coap_msg.assertCoapMessageContainsTlv(tlv)
elif (type == CheckType.NOT_CONTAIN):
coap_msg.assertCoapMessageDoesNotContainTlv(tlv)
elif (type == CheckType.OPTIONAL):
coap_msg.assertCoapMessageContainsOptionalTlv(tlv)
else:
raise ValueError("Invalid check type")
def check_router_id_cached(node, router_id, cached = True):
"""Verify if the node has cached any entries based on the router ID
"""
eidcaches = node.get_eidcaches()
if cached:
assert any(router_id == (int(rloc, 16) >> 10) for (_, rloc) in eidcaches)
else:
assert any(router_id == (int(rloc, 16) >> 10) for (_, rloc) in eidcaches) is False
| {
"content_hash": "e19b602b9448f18a8cdc342dda9c742e",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 178,
"avg_line_length": 51.73626373626374,
"alnum_prop": 0.7319456244689889,
"repo_name": "pvanhorn/openthread",
"id": "d29d1abd6d630985a5cddb02ec54f7dcdfd7bc2c",
"size": "14124",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/command.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "15855"
},
{
"name": "C",
"bytes": "666083"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "3727440"
},
{
"name": "M4",
"bytes": "51448"
},
{
"name": "Makefile",
"bytes": "94727"
},
{
"name": "Python",
"bytes": "1688471"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "38529"
}
],
"symlink_target": ""
} |
"""
This file contains grep processing.
"""
import re
import sys
import mysql.connector
from mysql.connector.constants import ClientFlag
from mysql.utilities.exception import EmptyResultError, FormatError
from mysql.utilities.common.format import print_list
from mysql.utilities.common.ip_parser import parse_connection
from mysql.utilities.common.options import obj2sql
KILL_QUERY, KILL_CONNECTION, PRINT_PROCESS = range(3)
ID = "ID"
USER = "USER"
HOST = "HOST"
DB = "DB"
COMMAND = "COMMAND"
TIME = "TIME"
STATE = "STATE"
INFO = "INFO"
#
# TODO : Can _spec and similar methods be shared for grep.py?
#
def _spec(info):
"""Create a server specification string from an info structure.
"""
result = "{user}:*@{host}:{port}".format(**info)
if "unix_socket" in info:
result += ":" + info["unix_socket"]
return result
_SELECT_PROC_FRM = """
SELECT
Id, User, Host, Db, Command, Time, State, Info
FROM
INFORMATION_SCHEMA.PROCESSLIST{condition}"""
def _make_select(matches, use_regexp, conditions):
"""Generate a SELECT statement for matching the processes.
"""
oper = 'REGEXP' if use_regexp else 'LIKE'
for field, pattern in matches:
conditions.append(" {0} {1} {2}"
"".format(field, oper, obj2sql(pattern)))
if len(conditions) > 0:
condition = "\nWHERE\n" + "\n AND\n".join(conditions)
else:
condition = ""
return _SELECT_PROC_FRM.format(condition=condition)
# Map to map single-letter suffixes number of seconds
_SECS = {'s': 1, 'm': 60, 'h': 3600, 'd': 24 * 3600, 'w': 7 * 24 * 3600}
_INCORRECT_FORMAT_MSG = "'{0}' does not have correct format"
def _make_age_cond(age):
"""Make age condition
Accept an age description return a timedelta representing the age. We
allow the forms: hh:mm:ss, mm:ss, 4h3m, with suffixes d (days), w (weeks),
h (hours), m (minutes), and s(seconds)
age[in] Age (time)
Returns string - time delta
"""
mobj = re.match(r"([+-])?(?:(?:(\d?\d):)?(\d?\d):)?(\d?\d)\Z", age)
if mobj:
sign, hrs, mins, secs = mobj.groups()
if not hrs:
hrs = 0
if not mins:
mins = 0
seconds = int(secs) + 60 * (int(mins) + 60 * int(hrs))
oper = "<=" if sign and sign == "-" else ">="
return ' {0} {1} {2}'.format(TIME, oper, seconds)
mobj = re.match(r"([+-])?(\d+[dwhms])+", age)
if mobj:
sign = None
if mobj.group(1):
sign = age[0]
age = age[1:]
seconds = 0
periods = [x for x in re.split(r"(\d+[dwhms])", age)]
if len(''.join(x[0::2])) > 0: # pylint: disable=W0631
raise FormatError(_INCORRECT_FORMAT_MSG.format(age))
for period in periods[1::2]:
seconds += int(period[0:-1]) * _SECS[period[-1:]]
oper = "<=" if sign and sign == "-" else ">="
return ' {0} {1} {2}'.format(TIME, oper, seconds)
raise FormatError(_INCORRECT_FORMAT_MSG.format(age))
_KILL_BODY = """
DECLARE kill_done INT;
DECLARE kill_cursor CURSOR FOR
{select}
OPEN kill_cursor;
BEGIN
DECLARE id BIGINT;
DECLARE EXIT HANDLER FOR NOT FOUND SET kill_done = 1;
kill_loop: LOOP
FETCH kill_cursor INTO id;
KILL {kill} id;
END LOOP kill_loop;
END;
CLOSE kill_cursor;"""
_KILL_PROCEDURE = """
CREATE PROCEDURE {name} ()
BEGIN{body}
END"""
class ProcessGrep(object):
"""Grep processing
"""
def __init__(self, matches, actions=None, use_regexp=False, age=None):
"""Constructor
matches[in] matches identified
actions[in] actions to perform
use_regexp[in] if True, use regexp for compare
default = False
age[in] age in time, if provided
default = None
"""
if actions is None:
actions = []
conds = [_make_age_cond(age)] if age else []
self.__select = _make_select(matches, use_regexp, conds).strip()
self.__actions = actions
def sql(self, only_body=False):
"""Generate a SQL command for KILL
This method generates the KILL <id> SQL command for killing processes.
It can also generate SQL to kill procedures by recreating them without
a body (if only_body = True).
only_body[in] if True, limit to body of object
default = False
Returns string - SQL statement
"""
params = {
'select': "\n ".join(self.__select.split("\n")),
'kill': 'CONNECTION' if KILL_CONNECTION in self.__actions
else 'QUERY',
}
if KILL_CONNECTION in self.__actions or KILL_QUERY in self.__actions:
sql = _KILL_BODY.format(**params)
if not only_body:
sql = _KILL_PROCEDURE.format(
name="kill_processes",
body="\n ".join(sql.split("\n"))
)
return sql
else:
return self.__select
def execute(self, connections, **kwrds):
"""Execute the search for processes, queries, or connections
This method searches for processes, queriers, or connections to
either kill or display the matches for one or more servers.
connections[in] list of connection parameters
kwrds[in] dictionary of options
output file stream to display information
default = sys.stdout
connector connector to use
default = mysql.connector
format format for display
default = GRID
"""
output = kwrds.get('output', sys.stdout)
connector = kwrds.get('connector', mysql.connector)
fmt = kwrds.get('format', "grid")
charset = kwrds.get('charset', None)
ssl_opts = kwrds.get('ssl_opts', {})
headers = ("Connection", "Id", "User", "Host", "Db",
"Command", "Time", "State", "Info")
entries = []
# Build SQL statement
for info in connections:
conn = parse_connection(info)
if not conn:
msg = "'%s' is not a valid connection specifier" % (info,)
raise FormatError(msg)
if charset:
conn['charset'] = charset
info = conn
if connector == mysql.connector:
# Add SSL parameters ONLY if they are not None
add_ssl_flag = False
if ssl_opts.get('ssl_ca') is not None:
info['ssl_ca'] = ssl_opts.get('ssl_ca')
add_ssl_flag = True
if ssl_opts.get('ssl_cert') is not None:
info['ssl_cert'] = ssl_opts.get('ssl_cert')
add_ssl_flag = True
if ssl_opts.get('ssl_key') is not None:
info['ssl_key'] = ssl_opts.get('ssl_key')
add_ssl_flag = True
if add_ssl_flag:
cpy_flags = [ClientFlag.SSL,
ClientFlag.SSL_VERIFY_SERVER_CERT]
info['client_flags'] = cpy_flags
connection = connector.connect(**info)
if not charset:
# If no charset provided, get it from the
# "character_set_client" server variable.
cursor = connection.cursor()
cursor.execute("SHOW VARIABLES LIKE 'character_set_client'")
res = cursor.fetchall()
connection.set_charset_collation(charset=str(res[0][1]))
cursor.close()
cursor = connection.cursor()
cursor.execute(self.__select)
print_rows = []
cols = ["Id", "User", "Host", "db", "Command", "Time",
"State", "Info"]
for row in cursor:
if (KILL_QUERY in self.__actions) or \
(KILL_CONNECTION in self.__actions):
print_rows.append(row)
cursor.execute("KILL {0}".format(row[0]))
if PRINT_PROCESS in self.__actions:
entries.append(tuple([_spec(info)] + list(row)))
if print_rows:
print "# The following KILL commands were executed:"
print_list(output, fmt, cols, print_rows)
# If output is None, nothing is printed
if len(entries) > 0 and output:
entries.sort(key=lambda fifth: fifth[5])
print_list(output, fmt, headers, entries)
elif PRINT_PROCESS in self.__actions:
raise EmptyResultError("No matches found")
| {
"content_hash": "c6f14916ec745babf3a582aa0f19148a",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 78,
"avg_line_length": 34.2265625,
"alnum_prop": 0.543026706231454,
"repo_name": "scavarda/mysql-dbcompare",
"id": "af3d4fca1cb91f66315455c06d709b9379e19cb2",
"size": "9499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysql-utilities-1.6.0/mysql/utilities/command/proc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7324"
},
{
"name": "Groff",
"bytes": "332329"
},
{
"name": "Python",
"bytes": "3103169"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import numpy as np
import pandas as pd
import networkx as nx
from os.path import isdir, dirname, abspath
from os import getcwd
from inspect import currentframe, getfile, getsourcefile
from sys import getfilesystemencoding, stdout
from IPython.core.display import HTML, display
from collections import OrderedDict
import datetime
import pytz
from nilmtk.datastore import HDFDataStore, CSVDataStore
def show_versions():
"""Prints versions of various dependencies"""
output = OrderedDict()
output["Date"] = str(datetime.datetime.now())
import sys
import platform
output["Platform"] = str(platform.platform())
system_information = sys.version_info
output["System version"] = "{}.{}".format(system_information.major,
system_information.minor)
PACKAGES = [
"nilmtk", "nilm_metadata", "numpy", "matplotlib", "pandas", "sklearn",
"hmmlearn"]
for package_name in PACKAGES:
key = package_name + " version"
try:
exec("import " + package_name)
except ImportError:
output[key] = "Not found"
else:
output[key] = eval(package_name + ".__version__")
try:
print(pd.show_versions())
except:
pass
else:
print("")
for k, v in output.iteritems():
print("{}: {}".format(k, v))
def timedelta64_to_secs(timedelta):
"""Convert `timedelta` to seconds.
Parameters
----------
timedelta : np.timedelta64
Returns
-------
float : seconds
"""
if len(timedelta) == 0:
return np.array([])
else:
return timedelta / np.timedelta64(1, 's')
def tree_root(graph):
"""Returns the object that is the root of the tree.
Parameters
----------
graph : networkx.Graph
"""
# from http://stackoverflow.com/a/4123177/732596
assert isinstance(graph, nx.Graph)
roots = [node for node, in_degree in graph.in_degree_iter()
if in_degree == 0]
n_roots = len(roots)
if n_roots > 1:
raise RuntimeError('Tree has more than one root!')
if n_roots == 0:
raise RuntimeError('Tree has no root!')
return roots[0]
def nodes_adjacent_to_root(graph):
root = tree_root(graph)
return graph.successors(root)
def index_of_column_name(df, name):
for i, col_name in enumerate(df.columns):
if col_name == name:
return i
raise KeyError(name)
def find_nearest(known_array, test_array):
"""Find closest value in `known_array` for each element in `test_array`.
Parameters
----------
known_array : numpy array
consisting of scalar values only; shape: (m, 1)
test_array : numpy array
consisting of scalar values only; shape: (n, 1)
Returns
-------
indices : numpy array; shape: (n, 1)
For each value in `test_array` finds the index of the closest value
in `known_array`.
residuals : numpy array; shape: (n, 1)
For each value in `test_array` finds the difference from the closest
value in `known_array`.
"""
# from http://stackoverflow.com/a/20785149/732596
index_sorted = np.argsort(known_array)
known_array_sorted = known_array[index_sorted]
idx1 = np.searchsorted(known_array_sorted, test_array)
idx2 = np.clip(idx1 - 1, 0, len(known_array_sorted)-1)
idx3 = np.clip(idx1, 0, len(known_array_sorted)-1)
diff1 = known_array_sorted[idx3] - test_array
diff2 = test_array - known_array_sorted[idx2]
indices = index_sorted[np.where(diff1 <= diff2, idx3, idx2)]
residuals = test_array - known_array[indices]
return indices, residuals
def container_to_string(container, sep='_'):
if isinstance(container, str):
string = container
else:
try:
string = sep.join([str(element) for element in container])
except TypeError:
string = str(container)
return string
def simplest_type_for(values):
n_values = len(values)
if n_values == 1:
return list(values)[0]
elif n_values == 0:
return
else:
return tuple(values)
def flatten_2d_list(list2d):
list1d = []
for item in list2d:
if isinstance(item, basestring):
list1d.append(item)
else:
try:
len(item)
except TypeError:
list1d.append(item)
else:
list1d.extend(item)
return list1d
def get_index(data):
"""
Parameters
----------
data : pandas.DataFrame or Series or DatetimeIndex
Returns
-------
index : the index for the DataFrame or Series
"""
if isinstance(data, (pd.DataFrame, pd.Series)):
index = data.index
elif isinstance(data, pd.DatetimeIndex):
index = data
else:
raise TypeError('wrong type for `data`.')
return index
def convert_to_timestamp(t):
"""
Parameters
----------
t : str or pd.Timestamp or datetime or None
Returns
-------
pd.Timestamp or None
"""
return None if t is None else pd.Timestamp(t)
def get_module_directory():
# Taken from http://stackoverflow.com/a/6098238/732596
path_to_this_file = dirname(getfile(currentframe()))
if not isdir(path_to_this_file):
encoding = getfilesystemencoding()
path_to_this_file = dirname(unicode(__file__, encoding))
if not isdir(path_to_this_file):
abspath(getsourcefile(lambda _: None))
if not isdir(path_to_this_file):
path_to_this_file = getcwd()
assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'
return path_to_this_file
def dict_to_html(dictionary):
def format_string(value):
try:
if isinstance(value, basestring) and 'http' in value:
html = '<a href="{url}">{url}</a>'.format(url=value)
else:
html = '{}'.format(value)
except UnicodeEncodeError:
html = ''
return html
html = '<ul>'
for key, value in dictionary.iteritems():
html += '<li><strong>{}</strong>: '.format(key)
if isinstance(value, list):
html += '<ul>'
for item in value:
html += '<li>{}</li>'.format(format_string(item))
html += '</ul>'
elif isinstance(value, dict):
html += dict_to_html(value)
else:
html += format_string(value)
html += '</li>'
html += '</ul>'
return html
def print_dict(dictionary):
html = dict_to_html(dictionary)
display(HTML(html))
def offset_alias_to_seconds(alias):
"""Seconds for each period length."""
dr = pd.date_range('00:00', periods=2, freq=alias)
return (dr[-1] - dr[0]).total_seconds()
def check_directory_exists(d):
if not isdir(d):
raise IOError("Directory '{}' does not exist.".format(d))
def tz_localize_naive(timestamp, tz):
if tz is None:
return timestamp
if timestamp is None or pd.isnull(timestamp):
return pd.NaT
timestamp = pd.Timestamp(timestamp)
if timestamp_is_naive(timestamp):
timestamp = timestamp.tz_localize('UTC')
return timestamp.tz_convert(tz)
def get_tz(df):
index = df.index
try:
tz = index.tz
except AttributeError:
tz = None
return tz
def timestamp_is_naive(timestamp):
"""
Parameters
----------
timestamp : pd.Timestamp or datetime.datetime
Returns
-------
True if `timestamp` is naive (i.e. if it does not have a
timezone associated with it). See:
https://docs.python.org/2/library/datetime.html#available-types
"""
if timestamp.tzinfo is None:
return True
elif timestamp.tzinfo.utcoffset(timestamp) is None:
return True
else:
return False
def get_datastore(filename, format, mode='a'):
"""
Parameters
----------
filename : string
format : 'CSV' or 'HDF'
mode : 'a' (append) or 'w' (write), optional
Returns
-------
metadata : dict
"""
if filename is not None:
if format == 'HDF':
return HDFDataStore(filename, mode)
elif format == 'CSV':
return CSVDataStore(filename)
else:
raise ValueError('format not recognised')
else:
ValueError('filename is None')
def normalise_timestamp(timestamp, freq):
"""Returns the nearest Timestamp to `timestamp` which would be
in the set of timestamps returned by pd.DataFrame.resample(freq=freq)
"""
timestamp = pd.Timestamp(timestamp)
series = pd.Series(np.NaN, index=[timestamp])
resampled = series.resample(freq)
return resampled.index[0]
def print_on_line(*strings):
print(*strings, end="")
stdout.flush()
def append_or_extend_list(lst, value):
if value is None:
return
elif isinstance(value, list):
lst.extend(value)
else:
lst.append(value)
def convert_to_list(list_like):
return [] if list_like is None else list(list_like)
def most_common(lst):
"""Returns the most common entry in lst."""
lst = list(lst)
counts = {item: lst.count(item) for item in set(lst)}
counts = pd.Series(counts)
counts.sort()
most_common = counts.index[-1]
return most_common
def capitalise_first_letter(string):
return string[0].upper() + string[1:]
def capitalise_index(index):
labels = list(index)
for i, label in enumerate(labels):
labels[i] = capitalise_first_letter(label)
return labels
def capitalise_legend(ax):
legend_handles = ax.get_legend_handles_labels()
labels = capitalise_index(legend_handles[1])
ax.legend(legend_handles[0], labels)
return ax
def safe_resample(data, **resample_kwargs):
try:
data = data.resample(**resample_kwargs)
except pytz.AmbiguousTimeError:
# Work-around for
# https://github.com/pydata/pandas/issues/10117
tz = data.index.tz.zone
data = data.tz_convert('UTC')
data = data.resample(**resample_kwargs)
data = data.tz_convert(tz)
return data
| {
"content_hash": "f33da08144f8c68f2512a6085ab4d2ad",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 78,
"avg_line_length": 26.186700767263428,
"alnum_prop": 0.604160562554937,
"repo_name": "josemao/nilmtk",
"id": "378bb53fa80c9e40faf0bada0365f66b11d09348",
"size": "10239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nilmtk/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2735"
},
{
"name": "Jupyter Notebook",
"bytes": "16159926"
},
{
"name": "Python",
"bytes": "470818"
}
],
"symlink_target": ""
} |
from django.conf import settings
from rest_framework.reverse import reverse
from waldur_mastermind.common import utils as common_utils
from waldur_mastermind.support import views as support_views
def create_issue(offering_request):
if not settings.WALDUR_SUPPORT['ENABLED']:
return
user = offering_request.requested_by
post_data = {
'summary': 'Request publishing of public offering',
'caller': reverse('user-detail', kwargs={'uuid': user.uuid.hex}),
'description': 'Please review and activate offering {offering_name} ({offering_uuid}). \n'
'Requestor: {user_name} / {user_uuid}. \n'
'Service provider: {customer_name} / {customer_uuid}'.format(
offering_name=offering_request.offering.name,
offering_uuid=offering_request.offering.uuid,
user_name=user.full_name,
user_uuid=user.uuid,
customer_name=offering_request.offering.customer.name,
customer_uuid=offering_request.offering.customer.uuid.hex,
),
'type': settings.WALDUR_SUPPORT['DEFAULT_OFFERING_ISSUE_TYPE'],
}
common_utils.create_request(
support_views.IssueViewSet.as_view({'post': 'create'}), user, post_data,
)
| {
"content_hash": "63b4e59babf700a7f7fb416d0740c011",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 98,
"avg_line_length": 40.193548387096776,
"alnum_prop": 0.6669341894060995,
"repo_name": "opennode/waldur-mastermind",
"id": "f74720fdde2f438a8c3b24a763b3944b4bd85994",
"size": "1246",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/waldur_mastermind/marketplace_flows/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
} |
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
import time
from decimal import Decimal
from itertools import product
import test_framework.messages
from test_framework.avatools import create_coinbase_stakes
from test_framework.key import ECKey
from test_framework.messages import NODE_NETWORK
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
p2p_port,
)
from test_framework.wallet_util import bytes_to_wif
def assert_net_servicesnames(servicesflag, servicenames):
"""Utility that checks if all flags are correctly decoded in
`getpeerinfo` and `getnetworkinfo`.
:param servicesflag: The services as an integer.
:param servicenames: The list of decoded services names, as strings.
"""
servicesflag_generated = 0
for servicename in servicenames:
servicesflag_generated |= getattr(
test_framework.messages, 'NODE_' + servicename)
assert servicesflag_generated == servicesflag
class NetTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-avalanche=1",
"-avaproofstakeutxodustthreshold=1000000",
"-avaproofstakeutxoconfirmations=1",
"-minrelaytxfee=10"],
["-avalanche=1",
"-avaproofstakeutxodustthreshold=1000000",
"-avaproofstakeutxoconfirmations=1",
"-minrelaytxfee=5"]]
self.supports_cli = False
def run_test(self):
# Get out of IBD for the minfeefilter and getpeerinfo tests.
self.generate(self.nodes[0], 101)
# Connect nodes both ways.
self.connect_nodes(0, 1)
self.connect_nodes(1, 0)
self.sync_all()
self.test_connection_count()
self.test_getpeerinfo()
self.test_getnettotals()
self.test_getnetworkinfo()
self.test_getaddednodeinfo()
self.test_service_flags()
self.test_getnodeaddresses()
self.test_addpeeraddress()
def test_connection_count(self):
self.log.info("Test getconnectioncount")
# After using `connect_nodes` to connect nodes 0 and 1 to each other.
assert_equal(self.nodes[0].getconnectioncount(), 2)
def test_getnettotals(self):
self.log.info("Test getnettotals")
# Test getnettotals and getpeerinfo by doing a ping. The bytes
# sent/received should increase by at least the size of one ping (32
# bytes) and one pong (32 bytes).
net_totals_before = self.nodes[0].getnettotals()
peer_info_before = self.nodes[0].getpeerinfo()
self.nodes[0].ping()
self.wait_until(
lambda:
self.nodes[0].getnettotals()['totalbytessent']
>= net_totals_before['totalbytessent'] + 32 * 2,
timeout=10)
self.wait_until(
lambda:
self.nodes[0].getnettotals()['totalbytesrecv']
>= net_totals_before['totalbytesrecv'] + 32 * 2,
timeout=10)
for peer_before in peer_info_before:
def peer_after():
return next(
p for p in self.nodes[0].getpeerinfo()
if p['id'] == peer_before['id']
)
self.wait_until(
lambda:
peer_after()['bytesrecv_per_msg'].get('pong', 0)
>= peer_before['bytesrecv_per_msg'].get('pong', 0) + 32,
timeout=10
)
self.wait_until(
lambda:
peer_after()['bytessent_per_msg'].get('ping', 0)
>= peer_before['bytessent_per_msg'].get('ping', 0) + 32,
timeout=10)
def test_getnetworkinfo(self):
self.log.info("Test getnetworkinfo")
info = self.nodes[0].getnetworkinfo()
assert_equal(info['networkactive'], True)
assert_equal(info['connections'], 2)
assert_equal(info['connections_in'], 1)
assert_equal(info['connections_out'], 1)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: false\n']):
self.nodes[0].setnetworkactive(state=False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
# Wait a bit for all sockets to close
self.wait_until(lambda: self.nodes[0].getnetworkinfo()[
'connections'] == 0, timeout=3)
with self.nodes[0].assert_debug_log(expected_msgs=['SetNetworkActive: true\n']):
self.nodes[0].setnetworkactive(state=True)
# Connect nodes both ways.
self.connect_nodes(0, 1)
self.connect_nodes(1, 0)
info = self.nodes[0].getnetworkinfo()
assert_equal(info['networkactive'], True)
assert_equal(info['connections'], 2)
assert_equal(info['connections_in'], 1)
assert_equal(info['connections_out'], 1)
# check the `servicesnames` field
network_info = [node.getnetworkinfo() for node in self.nodes]
for info in network_info:
assert_net_servicesnames(int(info["localservices"], 0x10),
info["localservicesnames"])
# Check dynamically generated networks list in getnetworkinfo help
# output.
assert (
"(ipv4, ipv6, onion, i2p)" in self.nodes[0].help("getnetworkinfo")
)
def test_getaddednodeinfo(self):
self.log.info("Test getaddednodeinfo")
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(node=ip_port, command='add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that node cannot be added again
assert_raises_rpc_error(-23,
"Node already added",
self.nodes[0].addnode,
node=ip_port,
command='add')
# check that node can be removed
self.nodes[0].addnode(node=ip_port, command='remove')
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# check that trying to remove the node again returns an error
assert_raises_rpc_error(-24,
"Node could not be removed",
self.nodes[0].addnode,
node=ip_port,
command='remove')
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added",
self.nodes[0].getaddednodeinfo, '1.1.1.1')
def test_getpeerinfo(self):
self.log.info("Test getpeerinfo")
# Create a few getpeerinfo last_block/last_transaction/last_proof
# values.
if self.is_wallet_compiled():
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1000000)
tip = self.generate(self.nodes[1], 1)[0]
self.sync_all()
stake = create_coinbase_stakes(
self.nodes[1], [tip], self.nodes[1].get_deterministic_priv_key().key)
privkey = ECKey()
privkey.generate()
proof = self.nodes[1].buildavalancheproof(
42, 2000000000, bytes_to_wif(privkey.get_bytes()), stake)
self.nodes[1].sendavalancheproof(proof)
self.sync_proofs()
time_now = int(time.time())
peer_info = [x.getpeerinfo() for x in self.nodes]
# Verify last_block, last_transaction and last_proof keys/values.
for node, peer, field in product(range(self.num_nodes), range(2), [
'last_block', 'last_transaction', 'last_proof']):
assert field in peer_info[node][peer].keys()
if peer_info[node][peer][field] != 0:
assert_approx(peer_info[node][peer][field], time_now, vspan=60)
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the
# other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
assert_equal(peer_info[0][0]['minfeefilter'], Decimal("5.00"))
assert_equal(peer_info[1][0]['minfeefilter'], Decimal("10.00"))
# check the `servicesnames` field
for info in peer_info:
assert_net_servicesnames(int(info[0]["services"], 0x10),
info[0]["servicesnames"])
assert_equal(peer_info[0][0]['connection_type'], 'inbound')
assert_equal(peer_info[0][1]['connection_type'], 'manual')
assert_equal(peer_info[1][0]['connection_type'], 'manual')
assert_equal(peer_info[1][1]['connection_type'], 'inbound')
# Check dynamically generated networks list in getpeerinfo help output.
assert (
"(ipv4, ipv6, onion, i2p, not_publicly_routable)" in
self.nodes[0].help("getpeerinfo")
)
# Node state fields
for node, peer, field in product(range(self.num_nodes), range(2),
['startingheight', 'synced_headers', 'synced_blocks', 'inflight']):
assert field in peer_info[node][peer].keys()
def test_service_flags(self):
self.log.info("Test service flags")
self.nodes[0].add_p2p_connection(
P2PInterface(), services=(
1 << 5) | (
1 << 63))
assert_equal(['UNKNOWN[2^5]', 'UNKNOWN[2^63]'],
self.nodes[0].getpeerinfo()[-1]['servicesnames'])
self.nodes[0].disconnect_p2ps()
def test_getnodeaddresses(self):
self.log.info("Test getnodeaddresses")
self.nodes[0].add_p2p_connection(P2PInterface())
# Add an IPv6 address to the address manager.
ipv6_addr = "1233:3432:2434:2343:3234:2345:6546:4534"
self.nodes[0].addpeeraddress(address=ipv6_addr, port=8333)
# Add 10,000 IPv4 addresses to the address manager. Due to the way bucket
# and bucket positions are calculated, some of these addresses will
# collide.
imported_addrs = []
for i in range(10000):
first_octet = i >> 8
second_octet = i % 256
a = f"{first_octet}.{second_octet}.1.1"
imported_addrs.append(a)
self.nodes[0].addpeeraddress(a, 8333)
# Fetch the addresses via the RPC and test the results.
# default count is 1
assert_equal(len(self.nodes[0].getnodeaddresses()), 1)
assert_equal(len(self.nodes[0].getnodeaddresses(count=2)), 2)
assert_equal(
len(self.nodes[0].getnodeaddresses(network="ipv4", count=8)), 8)
# Maximum possible addresses in AddrMan is 10000. The actual number will
# usually be less due to bucket and bucket position collisions.
node_addresses = self.nodes[0].getnodeaddresses(0, "ipv4")
assert_greater_than(len(node_addresses), 5000)
assert_greater_than(10000, len(node_addresses))
for a in node_addresses:
assert_greater_than(a["time"], 1527811200) # 1st June 2018
assert_equal(a["services"], NODE_NETWORK)
assert a["address"] in imported_addrs
assert_equal(a["port"], 8333)
assert_equal(a["network"], "ipv4")
# Test the IPv6 address.
res = self.nodes[0].getnodeaddresses(0, "ipv6")
assert_equal(len(res), 1)
assert_equal(res[0]["address"], ipv6_addr)
assert_equal(res[0]["network"], "ipv6")
assert_equal(res[0]["port"], 8333)
assert_equal(res[0]["services"], NODE_NETWORK)
# Test for the absence of onion and I2P addresses.
for network in ["onion", "i2p"]:
assert_equal(self.nodes[0].getnodeaddresses(0, network), [])
# Test invalid arguments.
assert_raises_rpc_error(-8, "Address count out of range",
self.nodes[0].getnodeaddresses, -1)
assert_raises_rpc_error(-8, "Network not recognized: Foo",
self.nodes[0].getnodeaddresses, 1, "Foo")
def test_addpeeraddress(self):
"""RPC addpeeraddress sets the source address equal to the destination address.
If an address with the same /16 as an existing new entry is passed, it will be
placed in the same new bucket and have a 1/64 chance of the bucket positions
colliding (depending on the value of nKey in the addrman), in which case the
new address won't be added. The probability of collision can be reduced to
1/2^16 = 1/65536 by using an address from a different /16. We avoid this here
by first testing adding a tried table entry before testing adding a new table one.
"""
self.log.info("Test addpeeraddress")
self.restart_node(1, ["-checkaddrman=1"])
node = self.nodes[1]
self.log.debug("Test that addpeerinfo is a hidden RPC")
# It is hidden from general help, but its detailed help may be called
# directly.
assert "addpeerinfo" not in node.help()
assert "addpeerinfo" in node.help("addpeerinfo")
self.log.debug("Test that adding an empty address fails")
assert_equal(
node.addpeeraddress(
address="", port=8333), {
"success": False})
assert_equal(node.getnodeaddresses(count=0), [])
self.log.debug(
"Test that adding a valid address to the tried table succeeds")
assert_equal(
node.addpeeraddress(
address="1.2.3.4", tried=True, port=8333), {
"success": True})
with node.assert_debug_log(expected_msgs=["CheckAddrman: new 0, tried 1, total 1 started"]):
# getnodeaddresses re-runs the addrman checks
addrs = node.getnodeaddresses(count=0)
assert_equal(len(addrs), 1)
assert_equal(addrs[0]["address"], "1.2.3.4")
assert_equal(addrs[0]["port"], 8333)
self.log.debug(
"Test that adding an already-present tried address to the new and tried tables fails")
for value in [True, False]:
assert_equal(
node.addpeeraddress(
address="1.2.3.4", tried=value, port=8333), {
"success": False})
assert_equal(len(node.getnodeaddresses(count=0)), 1)
self.log.debug(
"Test that adding a second address, this time to the new table, succeeds")
assert_equal(
node.addpeeraddress(
address="2.0.0.0", port=8333), {
"success": True})
with node.assert_debug_log(expected_msgs=["CheckAddrman: new 1, tried 1, total 2 started"]):
# getnodeaddresses re-runs the addrman checks
addrs = node.getnodeaddresses(count=0)
assert_equal(len(addrs), 2)
if __name__ == '__main__':
NetTest().main()
| {
"content_hash": "2e73315e7a8bec29f0c1c59bd869d967",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 108,
"avg_line_length": 43.04683195592286,
"alnum_prop": 0.5849865608601049,
"repo_name": "Bitcoin-ABC/bitcoin-abc",
"id": "e070d875f984a7418c4997a540ed767d782eb169",
"size": "15835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/rpc_net.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1160721"
},
{
"name": "C++",
"bytes": "9817660"
},
{
"name": "CMake",
"bytes": "195193"
},
{
"name": "CSS",
"bytes": "4284"
},
{
"name": "Dockerfile",
"bytes": "3559"
},
{
"name": "HTML",
"bytes": "25754"
},
{
"name": "Java",
"bytes": "41238"
},
{
"name": "JavaScript",
"bytes": "2366459"
},
{
"name": "Kotlin",
"bytes": "3712"
},
{
"name": "M4",
"bytes": "31132"
},
{
"name": "Makefile",
"bytes": "100617"
},
{
"name": "Objective-C++",
"bytes": "5811"
},
{
"name": "PHP",
"bytes": "94504"
},
{
"name": "Perl",
"bytes": "4551"
},
{
"name": "PowerShell",
"bytes": "2277"
},
{
"name": "Python",
"bytes": "2706993"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Ruby",
"bytes": "21108"
},
{
"name": "Rust",
"bytes": "54953"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Shell",
"bytes": "167526"
},
{
"name": "TypeScript",
"bytes": "66320"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='struct_bench',
version='0.1',
author='androm3da',
author_email='foo@example.com',
packages=find_packages(),
url='http://example.com/',
license='MIT',
description='TBD',
long_description='''TBD''',
install_requires=[
'cffi >= 1.1',
],
# extras_require={
# 'test': [
# 'tox',
# ]
# }
)
| {
"content_hash": "181bc1c6db570bdcc46f6e2c0e68f679",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 43,
"avg_line_length": 20.238095238095237,
"alnum_prop": 0.5294117647058824,
"repo_name": "androm3da/struct_bench",
"id": "9ca773482d6271168c06fe3c4c2e75c36aa19cae",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2762"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
from os.path import join as pjoin, abspath, dirname, pardir
PROJ_ROOT = abspath(pjoin(dirname(__file__), pardir))
DATA_ROOT = pjoin(PROJ_ROOT, 'data')
THUMBNAIL_PREFIX = 'test/cache/'
THUMBNAIL_DEBUG = True
THUMBNAIL_LOG_HANDLER = {
'class': 'sorl.thumbnail.log.ThumbnailLogHandler',
'level': 'ERROR',
}
THUMBNAIL_KVSTORE = 'thumbnail_tests.kvstore.TestKVStore'
THUMBNAIL_STORAGE = 'thumbnail_tests.storage.TestStorage'
DEFAULT_FILE_STORAGE = 'thumbnail_tests.storage.TestStorage'
ADMINS = (
('Sorl', 'thumbnail@sorl.net'),
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'thumbnail_tests',
}
}
MEDIA_ROOT = pjoin(PROJ_ROOT, 'media')
MEDIA_URL = '/media/'
ROOT_URLCONF = 'thumbnail_tests.urls'
INSTALLED_APPS = (
'thumbnail',
'thumbnail_tests',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.request",
)
| {
"content_hash": "df86f9d59008679b7be1081ef731f3c7",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 60,
"avg_line_length": 26.558823529411764,
"alnum_prop": 0.6854928017718716,
"repo_name": "makinacorpus/sorl-thumbnail",
"id": "e8468cce56718127eb37cf1a5e96933af231a96d",
"size": "903",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/settings/default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "94750"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
} |
__all__ = ["__version__", "version_tuple"]
try:
from ._version import version as __version__, version_tuple
except ImportError: # pragma: no cover
# broken installation, we don't even try
# unknown only works because we do poor mans version compare
__version__ = "unknown"
version_tuple = (0, 0, "unknown") # type:ignore[assignment]
| {
"content_hash": "c7fdaba0371323e27e97c4ae307f02a2",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 64,
"avg_line_length": 39.55555555555556,
"alnum_prop": 0.6601123595505618,
"repo_name": "pytest-dev/pytest",
"id": "8a406c5c7512bb928f1909cf1f43461cb3efb64c",
"size": "356",
"binary": false,
"copies": "10",
"ref": "refs/heads/main",
"path": "src/_pytest/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "192"
},
{
"name": "Python",
"bytes": "2748374"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'License'
db.create_table(u'licenses_license', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=255, blank=True)),
('creative_commons', self.gf('django.db.models.fields.BooleanField')(default=False)),
('cc_attribution', self.gf('django.db.models.fields.BooleanField')(default=False)),
('cc_noncommercial', self.gf('django.db.models.fields.BooleanField')(default=False)),
('cc_no_deriv', self.gf('django.db.models.fields.BooleanField')(default=False)),
('cc_share_alike', self.gf('django.db.models.fields.BooleanField')(default=False)),
('publishable', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'licenses', ['License'])
def backwards(self, orm):
# Deleting model 'License'
db.delete_table(u'licenses_license')
models = {
u'licenses.license': {
'Meta': {'object_name': 'License'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'cc_attribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_no_deriv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_noncommercial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cc_share_alike': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creative_commons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'publishable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['licenses'] | {
"content_hash": "5d95e9e223d928280b47127325c5dfbf",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 105,
"avg_line_length": 54.170212765957444,
"alnum_prop": 0.6076197957580518,
"repo_name": "seanbell/opensurfaces",
"id": "c1c002392bb17847098deffbc27235dbc2e8c835",
"size": "2570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/licenses/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2134399"
},
{
"name": "C++",
"bytes": "944309"
},
{
"name": "CMake",
"bytes": "1314"
},
{
"name": "CSS",
"bytes": "332038"
},
{
"name": "CoffeeScript",
"bytes": "245856"
},
{
"name": "HTML",
"bytes": "286807"
},
{
"name": "JavaScript",
"bytes": "395211"
},
{
"name": "Lua",
"bytes": "4605"
},
{
"name": "M",
"bytes": "43"
},
{
"name": "Makefile",
"bytes": "9862"
},
{
"name": "Matlab",
"bytes": "69652"
},
{
"name": "Objective-C",
"bytes": "547"
},
{
"name": "Python",
"bytes": "2161982"
},
{
"name": "Shell",
"bytes": "54309"
},
{
"name": "TeX",
"bytes": "35639"
}
],
"symlink_target": ""
} |
import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_isupper(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "40892adccb6bc4f9db8fca3089a02044",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 67,
"avg_line_length": 25.85,
"alnum_prop": 0.6286266924564797,
"repo_name": "twareproj/tware",
"id": "0829d2065c4b854cc0936420641f47a6e3bf109c",
"size": "535",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "refind/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20508"
},
{
"name": "C++",
"bytes": "623701"
},
{
"name": "CMake",
"bytes": "3790"
},
{
"name": "CSS",
"bytes": "4371"
},
{
"name": "HTML",
"bytes": "7491"
},
{
"name": "Java",
"bytes": "46899"
},
{
"name": "JavaScript",
"bytes": "1285"
},
{
"name": "Julia",
"bytes": "10578"
},
{
"name": "Python",
"bytes": "102461"
},
{
"name": "R",
"bytes": "5040"
},
{
"name": "Scala",
"bytes": "29844"
},
{
"name": "Shell",
"bytes": "21219"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
import datetime
import sys
import unittest
from django.contrib.admin import (
AllValuesFieldListFilter, BooleanFieldListFilter, ModelAdmin,
RelatedOnlyFieldListFilter, SimpleListFilter, site,
)
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import RequestFactory, TestCase, override_settings
from .models import Book, Bookmark, Department, Employee, TaggedItem
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class NotNinetiesListFilter(SimpleListFilter):
title = "Not nineties books"
parameter_name = "book_year"
def lookups(self, request, model_admin):
return (
('the 90s', "the 1990's"),
)
def queryset(self, request, queryset):
if self.value() == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
else:
return queryset.exclude(year__gte=1990, year__lte=1999)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1 / 0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.get_queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter):
title = 'department'
parameter_name = 'department'
def lookups(self, request, model_admin):
return sorted({
(employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code)
for employee in model_admin.get_queryset(request).all()
})
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
class DepartmentListFilterLookupWithUnderscoredParameter(DepartmentListFilterLookupWithNonStringValue):
parameter_name = 'department__whatever'
class DepartmentListFilterLookupWithDynamicValue(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
if self.value() == 'the 80s':
return (('the 90s', "the 1990's"),)
elif self.value() == 'the 90s':
return (('the 80s', "the 1980's"),)
else:
return (('the 80s', "the 1980's"), ('the 90s', "the 1990's"),)
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdmin2(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller2', 'date_registered', 'no')
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = (
'year',
'author',
'contributors',
('is_best_seller', BooleanFieldListFilter),
'date_registered',
'no',
)
class BookAdminWithUnderscoreLookupAndTuple(BookAdmin):
list_filter = (
'year',
('author__email', AllValuesFieldListFilter),
'contributors',
'is_best_seller',
'date_registered',
'no',
)
class BookAdminWithCustomQueryset(ModelAdmin):
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
list_filter = ('year',)
def get_queryset(self, request):
return super().get_queryset(request).filter(author=self.user)
class BookAdminRelatedOnlyFilter(ModelAdmin):
list_filter = (
'year', 'is_best_seller', 'date_registered', 'no',
('author', RelatedOnlyFieldListFilter),
('contributors', RelatedOnlyFieldListFilter),
('employee__department', RelatedOnlyFieldListFilter),
)
ordering = ('-id',)
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class NotNinetiesListFilterAdmin(ModelAdmin):
list_filter = (NotNinetiesListFilter,)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class DepartmentFilterEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithNonStringValue]
class DepartmentFilterUnderscoredEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithUnderscoredParameter]
class DepartmentFilterDynamicValueBookAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithDynamicValue]
class BookmarkAdminGenericRelation(ModelAdmin):
list_filter = ['tags__tag']
class ListFiltersTests(TestCase):
request_factory = RequestFactory()
@classmethod
def setUpTestData(cls):
cls.today = datetime.date.today()
cls.tomorrow = cls.today + datetime.timedelta(days=1)
cls.one_week_ago = cls.today - datetime.timedelta(days=7)
if cls.today.month == 12:
cls.next_month = cls.today.replace(year=cls.today.year + 1, month=1, day=1)
else:
cls.next_month = cls.today.replace(month=cls.today.month + 1, day=1)
cls.next_year = cls.today.replace(year=cls.today.year + 1, month=1, day=1)
# Users
cls.alfred = User.objects.create_superuser('alfred', 'alfred@example.com', 'password')
cls.bob = User.objects.create_user('bob', 'bob@example.com')
cls.lisa = User.objects.create_user('lisa', 'lisa@example.com')
# Books
cls.djangonaut_book = Book.objects.create(
title='Djangonaut: an art of living', year=2009,
author=cls.alfred, is_best_seller=True, date_registered=cls.today,
is_best_seller2=True,
)
cls.bio_book = Book.objects.create(
title='Django: a biography', year=1999, author=cls.alfred,
is_best_seller=False, no=207,
is_best_seller2=False,
)
cls.django_book = Book.objects.create(
title='The Django Book', year=None, author=cls.bob,
is_best_seller=None, date_registered=cls.today, no=103,
is_best_seller2=None,
)
cls.guitar_book = Book.objects.create(
title='Guitar for dummies', year=2002, is_best_seller=True,
date_registered=cls.one_week_ago,
is_best_seller2=True,
)
cls.guitar_book.contributors.set([cls.bob, cls.lisa])
# Departments
cls.dev = Department.objects.create(code='DEV', description='Development')
cls.design = Department.objects.create(code='DSN', description='Design')
# Employees
cls.john = Employee.objects.create(name='John Blue', department=cls.dev)
cls.jack = Employee.objects.create(name='Jack Red', department=cls.design)
def test_choicesfieldlistfilter_has_none_choice(self):
"""
The last choice is for the None value.
"""
class BookmarkChoicesAdmin(ModelAdmin):
list_display = ['none_or_null']
list_filter = ['none_or_null']
modeladmin = BookmarkChoicesAdmin(Bookmark, site)
request = self.request_factory.get('/', {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['display'], 'None')
self.assertEqual(choices[-1]['query_string'], '?none_or_null__isnull=True')
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist(request)
request = self.request_factory.get('/', {
'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow},
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertIs(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today,
self.tomorrow,
)
)
request = self.request_factory.get('/', {
'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.next_month},
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertIs(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(day=1),
self.next_month,
)
)
request = self.request_factory.get('/', {
'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.next_year},
)
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertIs(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(month=1, day=1),
self.next_year,
)
)
request = self.request_factory.get('/', {
'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow),
})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertIs(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
str(self.one_week_ago),
str(self.tomorrow),
)
)
# Null/not null queries
request = self.request_factory.get('/', {'date_registered__isnull': 'True'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0], self.bio_book)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), 'display', 'No date')
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__isnull=True')
request = self.request_factory.get('/', {'date_registered__isnull': 'False'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 3)
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), 'display', 'Has date')
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__isnull=False')
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows doesn't support setting a timezone that differs from the "
"system timezone."
)
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'year')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'year')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_allvaluesfieldlistfilter_custom_qs(self):
# Make sure that correct filters are returned with custom querysets
modeladmin = BookAdminWithCustomQueryset(self.alfred, Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
# Should have 'All', 1999 and 2009 options i.e. the subset of years of
# books written by alfred (which is the filtering criteria set by
# BookAdminWithCustomQueryset.get_queryset())
self.assertEqual(3, len(choices))
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['query_string'], '?year=1999')
self.assertEqual(choices[2]['query_string'], '?year=2009')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure that all users are present in the author's list filter
filterspec = changelist.get_filters(request)[0][1]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.request_factory.get('/', {'author__isnull': 'True'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_foreignkey_ordering(self):
"""RelatedFieldListFilter ordering respects ModelAdmin.ordering."""
class EmployeeAdminWithOrdering(ModelAdmin):
ordering = ('name',)
class BookAdmin(ModelAdmin):
list_filter = ('employee',)
site.register(Employee, EmployeeAdminWithOrdering)
self.addCleanup(lambda: site.unregister(Employee))
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [(self.jack.pk, 'Jack Red'), (self.john.pk, 'John Blue')]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedfieldlistfilter_foreignkey_ordering_reverse(self):
class EmployeeAdminWithOrdering(ModelAdmin):
ordering = ('-name',)
class BookAdmin(ModelAdmin):
list_filter = ('employee',)
site.register(Employee, EmployeeAdminWithOrdering)
self.addCleanup(lambda: site.unregister(Employee))
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [(self.john.pk, 'John Blue'), (self.jack.pk, 'Jack Red')]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedfieldlistfilter_foreignkey_default_ordering(self):
"""RelatedFieldListFilter ordering respects Model.ordering."""
class BookAdmin(ModelAdmin):
list_filter = ('employee',)
self.addCleanup(setattr, Employee._meta, 'ordering', Employee._meta.ordering)
Employee._meta.ordering = ('name',)
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [(self.jack.pk, 'Jack Red'), (self.john.pk, 'John Blue')]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure that all users are present in the contrib's list filter
filterspec = changelist.get_filters(request)[0][2]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(filterspec.title, 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(filterspec.title, 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'book')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'book')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
# With one book, the list filter should appear because there is also a
# (None) option.
Book.objects.exclude(pk=self.djangonaut_book.pk).delete()
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 2)
# With no books remaining, no list filters should appear.
Book.objects.all().delete()
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_relatedfieldlistfilter_reverse_relationships_default_ordering(self):
self.addCleanup(setattr, Book._meta, 'ordering', Book._meta.ordering)
Book._meta.ordering = ('title',)
modeladmin = CustomUserAdmin(User, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [
(self.bio_book.pk, 'Django: a biography'),
(self.djangonaut_book.pk, 'Djangonaut: an art of living'),
(self.guitar_book.pk, 'Guitar for dummies'),
(self.django_book.pk, 'The Django Book')
]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedonlyfieldlistfilter_foreignkey(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure that only actual authors are present in author's list filter
filterspec = changelist.get_filters(request)[0][4]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_relatedonlyfieldlistfilter_foreignkey_default_ordering(self):
"""RelatedOnlyFieldListFilter ordering respects Meta.ordering."""
class BookAdmin(ModelAdmin):
list_filter = (
('employee', RelatedOnlyFieldListFilter),
)
albert = Employee.objects.create(name='Albert Green', department=self.dev)
self.djangonaut_book.employee = albert
self.djangonaut_book.save()
self.bio_book.employee = self.jack
self.bio_book.save()
self.addCleanup(setattr, Employee._meta, 'ordering', Employee._meta.ordering)
Employee._meta.ordering = ('name',)
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
expected = [(albert.pk, 'Albert Green'), (self.jack.pk, 'Jack Red')]
self.assertEqual(filterspec.lookup_choices, expected)
def test_relatedonlyfieldlistfilter_underscorelookup_foreignkey(self):
Department.objects.create(code='TEST', description='Testing')
self.djangonaut_book.employee = self.john
self.djangonaut_book.save()
self.bio_book.employee = self.jack
self.bio_book.save()
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Only actual departments should be present in employee__department's
# list filter.
filterspec = changelist.get_filters(request)[0][6]
expected = [
(self.dev.code, str(self.dev)),
(self.design.code, str(self.design)),
]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_relatedonlyfieldlistfilter_manytomany(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure that only actual contributors are present in contrib's list filter
filterspec = changelist.get_filters(request)[0][5]
expected = [(self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_listfilter_genericrelation(self):
django_bookmark = Bookmark.objects.create(url='https://www.djangoproject.com/')
python_bookmark = Bookmark.objects.create(url='https://www.python.org/')
kernel_bookmark = Bookmark.objects.create(url='https://www.kernel.org/')
TaggedItem.objects.create(content_object=django_bookmark, tag='python')
TaggedItem.objects.create(content_object=python_bookmark, tag='python')
TaggedItem.objects.create(content_object=kernel_bookmark, tag='linux')
modeladmin = BookmarkAdminGenericRelation(Bookmark, site)
request = self.request_factory.get('/', {'tags__tag': 'python'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
expected = [python_bookmark, django_bookmark]
self.assertEqual(list(queryset), expected)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_booleanfieldlistfilter_nullbooleanfield(self):
modeladmin = BookAdmin2(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
request = self.request_factory.get('/', {'is_best_seller2__exact': 0})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, 'is best seller2')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller2__exact=0')
request = self.request_factory.get('/', {'is_best_seller2__exact': 1})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, 'is best seller2')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller2__exact=1')
request = self.request_factory.get('/', {'is_best_seller2__isnull': 'True'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, 'is best seller2')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller2__isnull=True')
def test_fieldlistfilter_underscorelookup_tuple(self):
"""
Ensure ('fieldpath', ClassName ) lookups pass lookup_allowed checks
when fieldpath contains double underscore in value (#19182).
"""
modeladmin = BookAdminWithUnderscoreLookupAndTuple(Book, site)
request = self.request_factory.get('/')
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
request = self.request_factory.get('/', {'author__email': 'alfred@example.com'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book, self.djangonaut_book])
def test_fieldlistfilter_invalid_lookup_parameters(self):
"""Filtering by an invalid value."""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__id__exact': 'StringNotInteger!'})
request.user = self.alfred
with self.assertRaises(IncorrectLookupParameters):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertIs(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertIs(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertIs(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertIs(choices[3]['selected'], True)
self.assertEqual(
choices[3]['query_string'],
'?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk
)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
request.user = self.alfred
msg = "The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
request.user = self.alfred
msg = "The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
When a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed (#17828).
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
request.user = self.alfred
with self.assertRaises(ZeroDivisionError):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertIs(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertIs(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertIs(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
list_filter works with two-characters long field names (#16080).
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'number')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
A SimpleListFilter's parameter name is not mistaken for a model field
if it ends with '__isnull' or '__in' (#17091).
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using non-string values
for lookups in SimpleListFilters (#19318).
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department': self.john.department.pk})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertIs(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.department.pk)
def test_lookup_with_non_string_value_underscored(self):
"""
Ensure SimpleListFilter lookups pass lookup_allowed checks when
parameter_name attribute contains double-underscore value (#19182).
"""
modeladmin = DepartmentFilterUnderscoredEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department__whatever': self.john.department.pk})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertIs(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__whatever=%s' % self.john.department.pk)
def test_fk_with_to_field(self):
"""
A filter on a FK respects the FK's to_field attribute (#17972).
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertIs(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertIs(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertIs(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertIs(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertIs(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertIs(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
def test_lookup_with_dynamic_value(self):
"""
Ensure SimpleListFilter can access self.value() inside the lookup.
"""
modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)
def _test_choices(request, expected_displays):
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'publication decade')
choices = tuple(c['display'] for c in filterspec.choices(changelist))
self.assertEqual(choices, expected_displays)
_test_choices(self.request_factory.get('/', {}),
("All", "the 1980's", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 80s'}),
("All", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 90s'}),
("All", "the 1980's"))
def test_list_filter_queryset_filtered_by_default(self):
"""
A list filter that filters the queryset by default gives the correct
full_result_count.
"""
modeladmin = NotNinetiesListFilterAdmin(Book, site)
request = self.request_factory.get('/', {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
changelist.get_results(request)
self.assertEqual(changelist.full_result_count, 4)
| {
"content_hash": "a04383e763a4c11f516b66c34e568b9a",
"timestamp": "",
"source": "github",
"line_count": 1312,
"max_line_length": 119,
"avg_line_length": 43.019817073170735,
"alnum_prop": 0.6523510860706566,
"repo_name": "pexip/os-python-django",
"id": "75563bbaaf790af40d6e494a8d44f90be8b5d6bd",
"size": "56442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/admin_filters/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85025"
},
{
"name": "HTML",
"bytes": "224693"
},
{
"name": "JavaScript",
"bytes": "257297"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "13116332"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Starts testengine using the given modelpackage.
Usage: tema.runmodelpackage MODELPACK.zip|MODELPACKDIR [OPTIONS]
The modelpackage is extracted, configured, composed, and ran with testengine.
Run runmodelpackage.py -h for help.
"""
import sys, os, optparse, zipfile, urllib, re, shutil
from tema.packagereader.packagereader import Reader
import tema.data.datareader as datareader
import tema.modelutils.generatetestconf as generatetestconf
import tema.modelutils.composemodel as composemodel
class RunModelPackageFailed(Exception):
pass
def parseArgs(cmdArgs):
# Check if we have guiguidance and guiadapter available
try:
import tema.guidance.guiguidance as guiguidance
guidancedefault="guiguidance"
except ImportError,e:
guidancedefault="randomguidance"
try:
import tema.adapter.guiadapter as guiadapter
adapterdefault="guiadapter"
except ImportError,e:
adapterdefault="socketserveradapter"
usage="runmodelpackage.py MODELPACK.zip|MODELPACKDIR [OPTIONS]\n\n\
MODELPACK.zip is something you can export from ModelDesigner\n\
It will be extracted to MODELPACK directory.\n\n\
Eg. runmodelpackage.py modelpackage.zip\n\
--devices=\"MyDevice\"\n\
--runonport=9090\n\
--guidance=randomguidance\n\n\
By default: using guiguidance, guiadapter (if available)\n\
and all the devices found in the package.\n\n\
-h for help."
op = optparse.OptionParser(usage=usage)
op.add_option('--runonport', metavar='PORT',
help="Shortcut to start socketserveradapter on the given port. "+
"Overrides --adapter and --adapter-args.")
op.add_option('--devices',
help="The name(s) of the device(s) to use in the test run. "+
"Space-separated. Locale can be given with separator ':' "+
"Default: all the devices in the package. " +
"Example: 'Ian:en Emulator'")
op.add_option('--products',
help="The name(s) of the product(s) to use in the test run. "+
"Space-separated. Default: all the products in the package")
op.add_option('--applications', default="",
help="The name(s) of the application(s) to use in the test run. "+
"Default: all the applications in the package for all devices. "+
"Example: 'Frank:Contacts,Messaging;Emulator:Gallery,BBC News'")
op.add_option('--exclude', metavar='FILES', default=(),
help="List of files to exclude from target and testrun.")
op.add_option('--nodevice', action='store_true',
help="Runs the modelpackage on a testadapter instead of a "+
"(real or simulated) device. Overrides --adapter.")
op.add_option('--nomake', action='store_true',
help="Don't create a new target. Use the existing one.")
op.add_option('--notestconf', action='store_true',
help="Don't run generate-testconf. Only unpack model package and generate testconfiguration.conf file.")
op.add_option('--norun', action='store_true',
help="Don't run the package. Just create a runnable target.")
op.add_option('--deviceperproduct', action='store_true',
help="Use only one device per product")
op.add_option('--targetdir',
help="Device where runnable model will be generated")
tegroup = optparse.OptionGroup(op, "arguments that are passed through to test engine")
# argument, default value
testengineArgs = ( ('--adapter',adapterdefault),
('--adapter-args',''),
('--guidance',guidancedefault),
('--guidance-args',''),
('--coverage',''),
('--coveragereq',''),
('--coveragereq-args',''),
('--initmodels',''),
('--config',''),
('--testdata',''),
('--actionpp',''),
('--actionpp-args',''),
('--stop-after',''),
('--verify-states','0'),
('--logger','fdlogger'),
('--logger-args', 'targetfd:stdout') )
for a,d in testengineArgs:
tegroup.add_option(a,default=d,metavar="...",
help="testengine argument %s" %(a,),)
op.add_option_group(tegroup)
op.add_option('--adapter-args-model', action='store_true', default=False,
help="adds the created model to adapter-args")
options,args = op.parse_args(cmdArgs)
if options.runonport:
options.adapter='socketserveradapter'
options.adapter_args='port:%s' % options.runonport
if options.nodevice:
options.adapter='testadapter'
options.testdata='nodata'
options.adapter_args_model=True
if options.exclude:
options.exclude=[re.compile(e+'$') for e in options.exclude.split(' ')]
if options.devices and options.deviceperproduct:
op.error("Options 'devices' and 'deviceperproduct' are mutually exclusive")
# Device2: App1,App2;Device2: App2,App3 ->
# {Device1: [App1,App2], Device2: [App2,App3]}
apps_dict = dict([[y[0],y[1].split(",")] for y in [x.split(":",1) for x in options.applications.split(";")] if len(y) == 2])
options.applications = apps_dict
if options.devices:
devices_list = [ x.split(":",1) for x in options.devices.split(" ")]
options.devices = {}
for d in devices_list:
if len(d) == 1 and len(d[0].strip()) == 0:
continue
elif len(d) == 1:
options.devices[d[0]] = ''
else:
options.devices[d[0]] = d[1]
else:
options.devices = {}
if options.notestconf:
options.norun = True
if len(args) < 1:
op.error("No modelpackage given.")
elif len(args) > 1:
op.error("Too many arguments.")
if not os.path.exists(args[0]):
op.error("File %s not found." %(args[0],))
return args[0], options
def executableInPath(exe):
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, exe)
if os.path.exists(exe_file) and os.access(exe_file, os.X_OK):
return True
return False
def isExcluded(filename,exclude):
for e in exclude:
if e.match(filename):
return True
return False
def listOfIncluded(filenames,options):
return [f for f in filenames if not isExcluded(f,options.exclude)]
def filesInDir(dirName,fileEnding=None):
return [f for f in os.listdir(dirName)
if (fileEnding is None or f.endswith(fileEnding))]
def getMddFile(modelDir):
mddFiles = filesInDir(modelDir,'.mdd')
if len(mddFiles) != 1:
raise RunModelPackageFailed(
"There should be only one .mdd file. Now there are %i."
% (len(mddFiles),) )
return mddFiles[0]
def getMddFileWithPath(modelDir):
return os.path.join(modelDir,getMddFile(modelDir))
def excludeDevices(dirName,tdFiles,options):
if options.devices:
allowedDevices = options.devices.keys()
else:
allowedDevices = []
chosen = {}
handle = None
for tdFile in tdFiles:
try:
handle = open(os.path.join(dirName,tdFile),'r')
contents = handle.readline()
finally:
if handle:
handle.close()
handle = None
l = datareader.getDataInfo(contents)[0]
if l in chosen and tdFile[:-3] not in allowedDevices:
continue
chosen[l]=tdFile
return chosen.values()
def getTdFiles(dirName,options):
# return listOfIncluded( filesInDir(dirName,'.td'), options )
return excludeDevices(dirName,listOfIncluded(filesInDir(dirName,'.td'),options),options)
def getTdFilesWithPath(dirName,options):
return [os.path.join(dirName,f) for f in getTdFiles(dirName,options)]
def getLstsFiles(dirName,options):
return listOfIncluded( filesInDir(dirName,'.lsts'), options )
def getLstsFilesWithPath(dirName,options):
return [os.path.join(dirName,f) for f in getLstsFiles(dirName,options)]
def getCsvFiles(dirName,options, onlyTargets = False):
if not onlyTargets:
return listOfIncluded( filesInDir(dirName,'.csv'), options )
csvFiles = set()
targets = getTargetDevices(dirName,options)
for dataname,logicalname,product in targets:
csvFiles.add("%s.csv" % product)
return list(csvFiles)
def getCsvFilesWithPath(dirName,options):
return [os.path.join(dirName,f) for f in getCsvFiles(dirName,options)]
def getActionppArgs(modelDir,targetDir,options):
if options.actionpp_args:
return options.actionpp_args
mddFile = getMddFileWithPath(modelDir)
reader = Reader(mddFile)
targets = getTargetDevices(modelDir,options)
actionpp_args = ""
tdFiles = getTdFilesWithPath(targetDir,options)
for d,l,p in targets:
devices = reader.getValue('devices',[p]).split('\n')
for tdFile in tdFiles:
for device in devices:
if tdFile.endswith(urllib.quote(device) + ".td"):
try:
handle = open(tdFile,'r')
contents = handle.readline()
finally:
if handle:
handle.close()
handle = None
log_cmp = datareader.getDataInfo(contents)[0]
if l == log_cmp:
locales = reader.getValue('locales',[p]).split('\n')
actionpp_args += "file:%s:%s.csv," % (tdFile,os.path.join(targetDir,p))
d_quot = urllib.quote(d)
if d_quot in options.devices and options.devices[d_quot] not in locales and options.devices[d_quot].strip() != '':
raise RunModelPackageFailed(
"Unknown locale '%s' for device '%s'" % (options.devices[d_quot],d))
elif d_quot in options.devices and options.devices[d_quot].strip() != '':
actionpp_args += "lang:%s:%s," % (tdFile,options.devices[d_quot])
else:
actionpp_args += "lang:%s:%s," % (tdFile,locales[0])
return actionpp_args
def getRulesFileWithPath(targetDir):
return os.path.join(targetDir,'combined-rules.ext')
def getLstsFilesOfDevice(device,product,modelDir,options):
mddFile = getMddFileWithPath(modelDir)
reader = Reader(mddFile)
concunits = reader.getValue('concurrentunits',[product]).split('\n')
if device in options.applications:
for selectedCu in options.applications[device]:
if selectedCu.strip() not in concunits:
raise RunModelPackageFailed(
"Error: no application named '%s' in product '%s' in device '%s'." % (selectedCu,product,device))
concunits = options.applications[device]
lstsFiles = getLstsFiles(modelDir,options)
productLstsFiles = []
for cu in concunits:
cu = cu.strip()
for lf in lstsFiles:
if lf.startswith(cu.replace(' ','%20')):
productLstsFiles.append(lf)
return productLstsFiles
def getAllTargetDevices(mddFile):
"""Returns a list of (devicename,productname) tuples."""
reader = Reader(mddFile)
products = reader.getValue('products').split('\n')
targets = []
for p in products:
devices = [d for d in reader.getValue('devices',[p]).split('\n') if d]
targets.extend( [(d,p) for d in devices] )
return targets
def targetDevicesAllowedByOptions(modelDir,allTargets,options):
logicalnames = []
selectedProducts = []
if options.devices:
allowedDevices = options.devices.keys()
else:
allowedDevices = None
if options.products:
allowedProducts = options.products.split()
else:
allowedProducts = None
chosen = []
for d,p in allTargets:
if allowedDevices and urllib.quote(d) not in allowedDevices:
continue
if allowedProducts and urllib.quote(p) not in allowedProducts:
continue
if isExcluded(d,options.exclude):
continue
# Options deviceperproduct and devices are mutually exclusive so we
# don't need to check for that
if options.deviceperproduct and p in selectedProducts:
continue
handle = None
try:
handle = open(os.path.join(modelDir,urllib.quote(d)) + ".td")
contents = handle.readline()
finally:
if handle:
handle.close()
l = datareader.getDataInfo(contents)[0]
if l in logicalnames:
continue
chosen.append( (d,l,p) )
logicalnames.append(l)
selectedProducts.append(p)
return chosen
def unzip(basedir,ziparchive):
def check_dir(directory):
if not os.path.isdir(directory):
os.makedirs(directory)
zip = None
try:
try:
zip = zipfile.ZipFile(ziparchive,'r')
for item in zip.namelist():
if not item.endswith('/'):
root,name = os.path.split(item)
directory = os.path.normpath(os.path.join(basedir,root))
check_dir(directory)
extract_item = open(os.path.join(directory, name), 'wb')
extract_item.write(zip.read(item))
extract_item.close()
else:
directory = os.path.normpath(os.path.join(basedir,item))
check_dir(directory)
except OSError,e:
return False
except IOError,e:
return False
finally:
if zip != None:
zip.close()
return True
def getTemaMake():
if executableInPath('tema'):
return ['tema','do_make']
elif executableInPath('tema.make'):
return ['tema.make']
else:
raise RunModelPackageFailed(
"Error: no 'tema' or 'tema.make' in path.")
def getTemaTestengine():
if executableInPath('tema'):
return ['tema','testengine']
elif executableInPath('tema.testengine'):
return ['tema.testengine']
else:
raise RunModelPackageFailed(
"Error: no 'tema' or 'tema.testengine' in path.")
def createModelDirFromZip(modelPackageZip):
modelDir = modelPackageZip[:-4]
removeDirIfExists(modelDir)
if not unzip(modelDir,modelPackageZip):
raise RunModelPackageFailed("Error while unzipping.")
return modelDir
def getTargetDevices(modelDir,options):
mddFile = getMddFileWithPath(modelDir)
allTargets = getAllTargetDevices(mddFile)
if not allTargets:
raise RunModelPackageFailed(
"No devices defined in the model package!\n"
"To fix: in ModelDesigner, New -> Phone\n"
"Example MyPhone.td:\n"
"MyPhone(id,number): [('999','040404040')]")
targets = targetDevicesAllowedByOptions(modelDir,allTargets,options)
if not targets:
err = "The following options didn't match anything in the package:\n"
if options.devices:
err += "--devices:\n" + "\n".join(options.devices.keys()) + "\n"
if options.products:
err += "--products:\n" + "\n".join(options.products.split()) + "\n"
err += "The following devices ARE defined in the package:\n" +\
"%s" % "\n".join(["%s (product: %s)"%(d,p) for d,p in allTargets])
raise RunModelPackageFailed(err)
for d,l,p in targets:
if not os.path.isdir( os.path.join(modelDir,urllib.quote(p))):
raise RunModelPackageFailed(
"There's no product dir '%s' in the model package."%(p,))
return targets
def createTestConfFile(modelDir,options):
targets = getTargetDevices(modelDir,options)
confName = os.path.join(modelDir, 'testconfiguration.conf')
lstsFilesByDevice = {}
for d,l,p in targets:
lstsFilesByDevice[d] =\
', '.join(getLstsFilesOfDevice(d,p,modelDir,options))
tdFiles = getTdFiles(modelDir,options)
csvFiles = getCsvFiles(modelDir,options,True)
lines = []
lines.append('[targets: type]')
lines.extend(['%s: %s'%(t[1],urllib.quote(t[2])) for t in targets])
lines.append('')
lines.append('[targets: actionmachines[]]')
lines.extend(['%s: %s' % (l,lstsFilesByDevice[d]) for d,l,p in targets])
lines.append('')
lines.append('[data: names[]]')
lines.append('datatables: ' + ', '.join(tdFiles))
lines.append('')
lines.append('localizationtables: ' + ', '.join(csvFiles))
lines.append('')
confFile = file(confName,'w')
confFile.write('\n'.join(lines))
confFile.close()
return confName
def generateTestConf(modelDir,confName,targetDir):
try:
generatetestconf.generatetestconf(modelDir,confName,targetDir)
except Exception, e:
raise RunModelPackageFailed(
"Error while generating testconf:\n%s" % (e,))
def makeTarget(targetDir):
origWorkDir = os.path.abspath(os.getcwd())
os.chdir(targetDir)
makeFailed=RunModelPackageFailed("Error while making the target.")
try:
try:
temamake = getTemaMake()
import subprocess
retcode = subprocess.call(temamake)
if retcode != 0:
raise makeFailed
except:
raise makeFailed
finally:
os.chdir(origWorkDir)
def composeTarget(targetDir):
composeFailed=RunModelPackageFailed("Error while making the target.")
try:
print targetDir
if not composemodel.compose_model(targetDir,"compose.conf"):
raise composeFailed
except Exception,e:
print e
raise composeFailed
def getTestengineArgs(modelDir,targetDir,options):
modelFile = getRulesFileWithPath(targetDir)
# tdFiles = getTdFilesWithPath(targetDir,options)
testdata = options.testdata or ','.join(['file:%s'%f for f in getTdFilesWithPath(targetDir,options)])
actionppArgs = getActionppArgs(modelDir,targetDir,options)
# csvFiles = getCsvFilesWithPath(targetDir,options)
# actionppArgs = options.actionpp_args or ",".join(['file:%s'%f for f in csvFiles])
adapterArgs = options.adapter_args
if options.adapter_args_model:
if adapterArgs: adapterArgs += ','
adapterArgs += 'model:%s'%modelFile
teArgs = ["--model=parallellstsmodel:%s" % modelFile,
"--testdata=%s" % testdata,
'--coverage=%s' % options.coverage,
"--coveragereq=%s" % options.coveragereq,
"--coveragereq-args=%s" % options.coveragereq_args,
"--guidance=%s" % options.guidance,
"--guidance-args=%s" % options.guidance_args,
"--adapter=%s" % options.adapter,
"--adapter-args=%s" % adapterArgs,
'--initmodels=%s' % options.initmodels,
'--config=%s' % options.config,
'--actionpp=%s' % options.actionpp,
'--actionpp-args=%s' % actionppArgs,
'--stop-after=%s' % options.stop_after,
'--verify-states=%s' % options.verify_states,
'--logger=%s' % options.logger,
'--logger-args=%s' % options.logger_args]
return teArgs
def removeDir(d):
shutil.rmtree(d)
def removeDirIfExists(d):
if os.path.exists(d):
removeDir(d)
def getTargetDir(modelDir,options):
if options.targetdir:
return options.targetdir
else:
return os.path.join(modelDir, 'runnable_target')
def createNewTarget(modelDir,options):
targetDir = getTargetDir(modelDir,options)
removeDirIfExists(targetDir)
confName = createTestConfFile(modelDir,options)
if not options.notestconf:
generateTestConf(modelDir,confName,targetDir)
composeTarget(targetDir)
# makeTarget(targetDir)
return targetDir
def runTarget(targetDir,testengineArgs):
argv_org = sys.argv[:]
sys.argv[1:] = testengineArgs
try:
import tema.testengine.testengine
except ImportError:
testengine = getTemaTestengine()
import subprocess
subprocess.call(testengine + testengineArgs)
def prepareTargetDir(modelDir,options):
if options.nomake:
targetDir = getTargetDir(modelDir,options)
if not os.path.exists(targetDir):
raise RunModelPackageFailed("There is no target dir.")
else:
targetDir = createNewTarget(modelDir,options)
return targetDir
def prepareModelDir(modelPackage):
if modelPackage.endswith('.zip'):
modelDir = createModelDirFromZip(modelPackage)
elif os.path.isdir(modelPackage):
modelDir = modelPackage
else:
raise RunModelPackageFailed(
"Modelpackage should be a .zip file or a directory.\n")
return modelDir
def runModelPackage(modelPackage,options):
print "Preparing model package %s" % (modelPackage,)
modelDir = prepareModelDir(modelPackage)
print "The model dir is %s" % (modelDir,)
print "Tesconfiguration file is %s/testconfiguration.conf" % (modelDir,)
print "Preparing the target dir"
targetDir = prepareTargetDir(modelDir,options)
if not options.notestconf:
print "Target ready at %s" % (targetDir,)
print "The rules file is %s"%(getRulesFileWithPath(targetDir),)
if not options.norun:
print "Running the target on testengine"
testengineArgs = getTestengineArgs(modelDir,targetDir,options)
print "Args: %s" % (" ".join(testengineArgs),)
runTarget(targetDir,testengineArgs)
print "Done"
def main(cmdArgs):
modelPackage,options = parseArgs(cmdArgs)
try:
runModelPackage(modelPackage,options)
except RunModelPackageFailed, failed:
sys.exit("-"*15+" Runmodelpackage failed! "+"-"*15+"\n" + str(failed))
if __name__=='__main__':
main(sys.argv[1:])
| {
"content_hash": "73b1a3f4c0c817451819c6a04f44e425",
"timestamp": "",
"source": "github",
"line_count": 614,
"max_line_length": 138,
"avg_line_length": 36.258957654723126,
"alnum_prop": 0.6100256030184611,
"repo_name": "tema-tut/tema-tg",
"id": "4cb5764ef9b25d19e2842025b915c2b0348ad7d7",
"size": "23407",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ModelUtils/runmodelpackage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "924079"
},
{
"name": "Shell",
"bytes": "2195"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from unittest.mock import MagicMock, patch
import pytest
from airflow.models import DAG, Connection
from airflow.providers.dbt.cloud.hooks.dbt import DbtCloudHook, DbtCloudJobRunException, DbtCloudJobRunStatus
from airflow.providers.dbt.cloud.operators.dbt import (
DbtCloudGetJobRunArtifactOperator,
DbtCloudRunJobOperator,
)
from airflow.utils import db, timezone
DEFAULT_DATE = timezone.datetime(2021, 1, 1)
TASK_ID = "run_job_op"
ACCOUNT_ID_CONN = "account_id_conn"
NO_ACCOUNT_ID_CONN = "no_account_id_conn"
DEFAULT_ACCOUNT_ID = 11111
ACCOUNT_ID = 22222
TOKEN = "token"
PROJECT_ID = 33333
JOB_ID = 4444
RUN_ID = 5555
EXPECTED_JOB_RUN_OP_EXTRA_LINK = (
"https://cloud.getdbt.com/#/accounts/{account_id}/projects/{project_id}/runs/{run_id}/"
)
DEFAULT_ACCOUNT_JOB_RUN_RESPONSE = {
"data": {
"id": RUN_ID,
"href": EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=DEFAULT_ACCOUNT_ID, project_id=PROJECT_ID, run_id=RUN_ID
),
}
}
EXPLICIT_ACCOUNT_JOB_RUN_RESPONSE = {
"data": {
"id": RUN_ID,
"href": EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=ACCOUNT_ID, project_id=PROJECT_ID, run_id=RUN_ID
),
}
}
def setup_module():
# Connection with ``account_id`` specified
conn_account_id = Connection(
conn_id=ACCOUNT_ID_CONN,
conn_type=DbtCloudHook.conn_type,
login=DEFAULT_ACCOUNT_ID,
password=TOKEN,
)
# Connection with no ``account_id`` specified
conn_no_account_id = Connection(
conn_id=NO_ACCOUNT_ID_CONN,
conn_type=DbtCloudHook.conn_type,
password=TOKEN,
)
db.merge_conn(conn_account_id)
db.merge_conn(conn_no_account_id)
class TestDbtCloudRunJobOperator:
def setup_method(self):
self.dag = DAG("test_dbt_cloud_job_run_op", start_date=DEFAULT_DATE)
self.mock_ti = MagicMock()
self.mock_context = {"ti": self.mock_ti}
self.config = {
"job_id": JOB_ID,
"check_interval": 1,
"timeout": 3,
"steps_override": ["dbt run --select my_first_dbt_model"],
"schema_override": "another_schema",
"additional_run_config": {"threads_override": 8},
}
@patch.object(DbtCloudHook, "trigger_job_run", return_value=MagicMock(**DEFAULT_ACCOUNT_JOB_RUN_RESPONSE))
@pytest.mark.parametrize(
"job_run_status, expected_output",
[
(DbtCloudJobRunStatus.SUCCESS.value, "success"),
(DbtCloudJobRunStatus.ERROR.value, "exception"),
(DbtCloudJobRunStatus.CANCELLED.value, "exception"),
(DbtCloudJobRunStatus.RUNNING.value, "timeout"),
(DbtCloudJobRunStatus.QUEUED.value, "timeout"),
(DbtCloudJobRunStatus.STARTING.value, "timeout"),
],
)
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_wait_for_termination(
self, mock_run_job, conn_id, account_id, job_run_status, expected_output
):
operator = DbtCloudRunJobOperator(
task_id=TASK_ID, dbt_cloud_conn_id=conn_id, account_id=account_id, dag=self.dag, **self.config
)
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert operator.wait_for_termination
assert operator.steps_override == self.config["steps_override"]
assert operator.schema_override == self.config["schema_override"]
assert operator.additional_run_config == self.config["additional_run_config"]
with patch.object(DbtCloudHook, "get_job_run") as mock_get_job_run:
mock_get_job_run.return_value.json.return_value = {
"data": {"status": job_run_status, "id": RUN_ID}
}
if expected_output == "success":
operator.execute(context=self.mock_context)
assert mock_run_job.return_value.data["id"] == RUN_ID
elif expected_output == "exception":
# The operator should fail if the job run fails or is cancelled.
with pytest.raises(DbtCloudJobRunException) as err:
operator.execute(context=self.mock_context)
assert err.value.endswith("has failed or has been cancelled.")
else:
# Demonstrating the operator timing out after surpassing the configured timeout value.
with pytest.raises(DbtCloudJobRunException) as err:
operator.execute(context=self.mock_context)
assert err.value.endswith(
f"has not reached a terminal status after {self.config['timeout']} seconds."
)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=f"Triggered via Apache Airflow by task {TASK_ID!r} in the {self.dag.dag_id} DAG.",
steps_override=self.config["steps_override"],
schema_override=self.config["schema_override"],
additional_run_config=self.config["additional_run_config"],
)
if job_run_status in DbtCloudJobRunStatus.TERMINAL_STATUSES.value:
assert mock_get_job_run.call_count == 1
else:
# When the job run status is not in a terminal status or "Success", the operator will
# continue to call ``get_job_run()`` until a ``timeout`` number of seconds has passed
# (3 seconds for this test). Therefore, there should be 4 calls of this function: one
# initially and 3 for each check done at a 1 second interval.
assert mock_get_job_run.call_count == 4
@patch.object(DbtCloudHook, "trigger_job_run")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_execute_no_wait_for_termination(self, mock_run_job, conn_id, account_id):
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=None,
dag=self.dag,
wait_for_termination=False,
**self.config,
)
assert operator.dbt_cloud_conn_id == conn_id
assert operator.job_id == self.config["job_id"]
assert operator.account_id == account_id
assert operator.check_interval == self.config["check_interval"]
assert operator.timeout == self.config["timeout"]
assert not operator.wait_for_termination
assert operator.steps_override == self.config["steps_override"]
assert operator.schema_override == self.config["schema_override"]
assert operator.additional_run_config == self.config["additional_run_config"]
with patch.object(DbtCloudHook, "get_job_run") as mock_get_job_run:
operator.execute(context=self.mock_context)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=f"Triggered via Apache Airflow by task {TASK_ID!r} in the {self.dag.dag_id} DAG.",
steps_override=self.config["steps_override"],
schema_override=self.config["schema_override"],
additional_run_config=self.config["additional_run_config"],
)
mock_get_job_run.assert_not_called()
@patch.object(DbtCloudHook, "trigger_job_run")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_custom_trigger_reason(self, mock_run_job, conn_id, account_id):
custom_trigger_reason = "Some other trigger reason."
operator = DbtCloudRunJobOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
account_id=account_id,
trigger_reason=custom_trigger_reason,
dag=self.dag,
**self.config,
)
assert operator.trigger_reason == custom_trigger_reason
with patch.object(DbtCloudHook, "get_job_run") as mock_get_job_run:
mock_get_job_run.return_value.json.return_value = {
"data": {"status": DbtCloudJobRunStatus.SUCCESS.value, "id": RUN_ID}
}
operator.execute(context=self.mock_context)
mock_run_job.assert_called_once_with(
account_id=account_id,
job_id=JOB_ID,
cause=custom_trigger_reason,
steps_override=self.config["steps_override"],
schema_override=self.config["schema_override"],
additional_run_config=self.config["additional_run_config"],
)
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_run_job_operator_link(self, conn_id, account_id, create_task_instance_of_operator, request):
ti = create_task_instance_of_operator(
DbtCloudRunJobOperator,
dag_id="test_dbt_cloud_run_job_op_link",
execution_date=DEFAULT_DATE,
task_id="trigger_dbt_cloud_job",
dbt_cloud_conn_id=conn_id,
job_id=JOB_ID,
account_id=account_id,
)
if request.node.callspec.id == "default_account":
_run_response = DEFAULT_ACCOUNT_JOB_RUN_RESPONSE
else:
_run_response = EXPLICIT_ACCOUNT_JOB_RUN_RESPONSE
ti.xcom_push(key="job_run_url", value=_run_response["data"]["href"])
url = ti.task.get_extra_links(ti, "Monitor Job Run")
assert url == (
EXPECTED_JOB_RUN_OP_EXTRA_LINK.format(
account_id=account_id if account_id else DEFAULT_ACCOUNT_ID,
project_id=PROJECT_ID,
run_id=_run_response["data"]["id"],
)
)
class TestDbtCloudGetJobRunArtifactOperator:
def setup_method(self):
self.dag = DAG("test_dbt_cloud_get_artifact_op", start_date=DEFAULT_DATE)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_artifact")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_get_json_artifact(self, mock_get_artifact, conn_id, account_id):
operator = DbtCloudGetJobRunArtifactOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
run_id=RUN_ID,
account_id=account_id,
path="path/to/my/manifest.json",
dag=self.dag,
)
mock_get_artifact.return_value.json.return_value = {"data": "file contents"}
operator.execute(context={})
mock_get_artifact.assert_called_once_with(
run_id=RUN_ID,
path="path/to/my/manifest.json",
account_id=account_id,
step=None,
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_artifact")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_get_json_artifact_with_step(self, mock_get_artifact, conn_id, account_id):
operator = DbtCloudGetJobRunArtifactOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
run_id=RUN_ID,
account_id=account_id,
path="path/to/my/manifest.json",
step=2,
dag=self.dag,
)
mock_get_artifact.return_value.json.return_value = {"data": "file contents"}
operator.execute(context={})
mock_get_artifact.assert_called_once_with(
run_id=RUN_ID,
path="path/to/my/manifest.json",
account_id=account_id,
step=2,
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_artifact")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_get_text_artifact(self, mock_get_artifact, conn_id, account_id):
operator = DbtCloudGetJobRunArtifactOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
run_id=RUN_ID,
account_id=account_id,
path="path/to/my/model.sql",
dag=self.dag,
)
mock_get_artifact.return_value.text = "file contents"
operator.execute(context={})
mock_get_artifact.assert_called_once_with(
run_id=RUN_ID,
path="path/to/my/model.sql",
account_id=account_id,
step=None,
)
@patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_run_artifact")
@pytest.mark.parametrize(
"conn_id, account_id",
[(ACCOUNT_ID_CONN, None), (NO_ACCOUNT_ID_CONN, ACCOUNT_ID)],
ids=["default_account", "explicit_account"],
)
def test_get_text_artifact_with_step(self, mock_get_artifact, conn_id, account_id):
operator = DbtCloudGetJobRunArtifactOperator(
task_id=TASK_ID,
dbt_cloud_conn_id=conn_id,
run_id=RUN_ID,
account_id=account_id,
path="path/to/my/model.sql",
step=2,
dag=self.dag,
)
mock_get_artifact.return_value.text = "file contents"
operator.execute(context={})
mock_get_artifact.assert_called_once_with(
run_id=RUN_ID,
path="path/to/my/model.sql",
account_id=account_id,
step=2,
)
| {
"content_hash": "0119ab14e3a8e6c2d6860e5e5d377da3",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 110,
"avg_line_length": 38.56417112299465,
"alnum_prop": 0.5973791860223254,
"repo_name": "cfei18/incubator-airflow",
"id": "11b155c1d8a873df821a799a90193c5f95e5b21c",
"size": "15208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/dbt/cloud/operators/test_dbt_cloud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
import pymysql.cursors
from models.group import Group
from models.contact import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True)
#self.connection.autocommit = True
def get_group_list(self):
group_list = []
cursor = self.connection.cursor()
try:
cursor.execute("SELECT group_id, group_name, group_header, group_footer FROM group_list")
for row in cursor:
(id, name, header, footer)=row
group_list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return group_list
def get_contact_list(self):
contact_list = []
cursor = self.connection.cursor()
try:
cursor.execute("SELECT id, firstname, lastname, address, home, mobile, work, email, email2, email3 "
"FROM addressbook WHERE deprecated IS NULL")
for row in cursor.fetchall():
(id, first_name, last_name, address, home_phone, mobile_phone, work_phone, email_1, email_2, email_3)=row
contact_list.append(Contact(id=str(id), first_name=first_name, last_name=last_name, address=address,
home_phone=home_phone, mobile_phone=mobile_phone, work_phone=work_phone,
email_1=email_1, email_2=email_2, email_3=email_3))
finally:
cursor.close()
return contact_list
def destroy(self):
self.connection.close() | {
"content_hash": "18abda1487ee44dd3055da7a17b501d3",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 121,
"avg_line_length": 39.19565217391305,
"alnum_prop": 0.5862451469772602,
"repo_name": "rgurevych/python_for_testers",
"id": "2da6b50b55c874a6247293480daf700cfb096e64",
"size": "1803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "1407"
},
{
"name": "HTML",
"bytes": "420418"
},
{
"name": "Python",
"bytes": "46959"
},
{
"name": "RobotFramework",
"bytes": "1902"
}
],
"symlink_target": ""
} |
try:
import requests
except Exception as e:
print("GeneratePublishReportPart1 depends on requests library. Install it with `pip install requests`")
sys.exit(1)
import argparse, cgi, re
from os.path import exists, isdir
from os import makedirs
metadataChecks = {
'https://vaadin.com/download/LATEST7': '^7\..*',
'https://vaadin.com/download/VERSIONS_7': '^7\..*',
'https://vaadin.com/download/release/7.7/LATEST': '^7\..*',
'https://vaadin.com/download/LATEST': '^6\..*',
'https://vaadin.com/download/LATEST8': '^8\.1\..*',
'https://vaadin.com/download/PRERELEASES': '^{ver}'
}
parser = argparse.ArgumentParser(description="Post-publish report generator")
parser.add_argument("version", type=str, help="Vaadin version that was just built")
parser.add_argument("teamcityUrl", type=str, help="Address to the teamcity server")
parser.add_argument("buildTypeId", type=str, help="The ID of this build step")
parser.add_argument("buildId", type=str, help="ID of the build to generate this report for")
args = parser.parse_args()
traffic_light = "<svg width=\"20px\" height=\"20px\" style=\"padding-right:5px\"><circle cx=\"10\" cy=\"10\" r=\"10\" fill=\"{color}\"/></svg>"
def getTrafficLight(b):
return traffic_light.format(color="green") if b else traffic_light.format(color="red")
resultPath = "result"
if not exists(resultPath):
makedirs(resultPath)
elif not isdir(resultPath):
print("Result path is not a directory.")
sys.exit(1)
(major, minor, maintenance) = args.version.split(".", 2)
prerelease = "." in maintenance
if prerelease:
maintenance = maintenance.split('.')[0]
def checkUrlContents(url, regexp):
r = requests.get(url)
return re.match(regexp, r.text) != None
def checkUrlStatus(url):
r = requests.get(url)
return r.status_code == 200
metadataOk = True
for url in metadataChecks:
metadataOk = metadataOk and checkUrlContents(url, metadataChecks[url].format(ver=args.version))
tagOk = checkUrlStatus("https://github.com/vaadin/framework/releases/tag/{ver}".format(ver=args.version))
if not prerelease:
downloadPageOk = checkUrlStatus("https://vaadin.com/download/release/{maj}.{min}/{ver}/".format(maj=major, min=minor, ver=args.version))
else:
downloadPageOk = checkUrlStatus("https://vaadin.com/download/prerelease/{maj}.{min}/{maj}.{min}.{main}/{ver}".format(maj=major, min=minor, main=maintenance, ver=args.version))
content = """<html>
<head></head>
<body>
<table>
<tr><td>{metadataOk}</td><td>Metadata ok on vaadin.com</td></tr>
<tr><td>{downloadPageOk}</td><td>Download folder on vaadin.com contains the version</td></tr>
""".format(metadataOk=getTrafficLight(metadataOk), downloadPageOk=getTrafficLight(downloadPageOk))
mavenUrl = ""
if not prerelease:
mavenUrl = "http://repo1.maven.org/maven2/com/vaadin/vaadin-server/"
content += "<tr><td></td><td><a href='{mvnUrl}'>Check {ver} is published to maven.org (might take a while)</td></tr>".format(ver=args.version, mvnUrl=mavenUrl)
else:
mavenUrl = "http://maven.vaadin.com/vaadin-prereleases/com/vaadin/vaadin-server/"
content += "<tr><td></td><td><a href='{mvnUrl}'>Check {ver} is published as prerelease to maven.vaadin.com</td></tr>".format(ver=args.version, mvnUrl=mavenUrl)
content += "<tr><td></td><td><a href=\"https://github.com/vaadin/framework/milestones\">Create milestone for next version in GitHub</a></td></tr>"
#content += """
#<tr><td></td><td><a href="http://test.vaadin.com/{version}/run/LabelModes?restartApplication">Verify uploaded to test.vaadin.com</a></td></tr>
#""".format(version=args.version)
if not prerelease:
content += '<tr><td></td><td><a href="http://vaadin.com/api">Verify API version list updated</a></td></tr>'
content += "<tr><td></td><td>Run the generated tag_repositories.sh script</td></tr>"
# close GitHub milestone
content += "<tr><td></td><td><a href=\"https://github.com/vaadin/framework/milestones\">Close GitHub Milestone and create one for next version</a></td></tr>"
# release notes
content += "<tr><td></td><td><a href=\"https://github.com/vaadin/framework/releases/new\">Prepare release notes in GH</a></td></tr>"
content += """
<tr><td></td><td><a href="http://{teamcityUrl}/viewLog.html?buildId={buildId}&buildTypeId={buildTypeId}&tab=dependencies"><h2>Start Post-Publish Release from dependencies tab</a></td></tr>
</table>
</body>
</html>""".format(teamcityUrl=args.teamcityUrl, buildTypeId=args.buildTypeId, buildId=args.buildId, version=args.version)
f = open("result/report.html", 'w')
f.write(content)
| {
"content_hash": "0ec5b6c5115e2ff3f5b6d782f4fcc690",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 188,
"avg_line_length": 43.86274509803921,
"alnum_prop": 0.7116674117121145,
"repo_name": "Darsstar/framework",
"id": "37b2f8e4090a808ca9063799b8eab33553d3920b",
"size": "4489",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/GeneratePublishReportPart1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "751129"
},
{
"name": "HTML",
"bytes": "104675"
},
{
"name": "Java",
"bytes": "24247164"
},
{
"name": "JavaScript",
"bytes": "131503"
},
{
"name": "Python",
"bytes": "33975"
},
{
"name": "Shell",
"bytes": "14720"
},
{
"name": "Smarty",
"bytes": "175"
}
],
"symlink_target": ""
} |
"""Netmiko Cisco WLC support."""
from typing import Any, Union, Sequence, Iterator, TextIO
import time
import re
import socket
from netmiko.exceptions import NetmikoAuthenticationException
from netmiko.base_connection import BaseConnection
class CiscoWlcSSH(BaseConnection):
"""Netmiko Cisco WLC support."""
prompt_pattern = r"(?m:[>#]\s*$)" # force re.Multiline
def special_login_handler(self, delay_factor: float = 1.0) -> None:
"""WLC presents with the following on login (in certain OS versions)
login as: user
(Cisco Controller)
User: user
Password:****
"""
output = ""
uname = "User:"
login = "login as"
password = "ssword"
pattern = rf"(?:{uname}|{login}|{password}|{self.prompt_pattern})"
while True:
new_data = self.read_until_pattern(pattern=pattern, read_timeout=25.0)
output += new_data
if re.search(self.prompt_pattern, new_data):
return
if uname in new_data or login in new_data:
assert isinstance(self.username, str)
self.write_channel(self.username + self.RETURN)
elif password in new_data:
assert isinstance(self.password, str)
self.write_channel(self.password + self.RETURN)
else:
msg = f"""
Failed to login to Cisco WLC Device.
Pattern not detected: {pattern}
output:
{output}
"""
raise NetmikoAuthenticationException(msg)
def session_preparation(self) -> None:
"""
Prepare the session after the connection has been established
Cisco WLC uses "config paging disable" to disable paging
"""
# _test_channel_read() will happen in the special_login_handler()
try:
self.set_base_prompt()
except ValueError:
msg = f"Authentication failed: {self.host}"
raise NetmikoAuthenticationException(msg)
self.disable_paging(command="config paging disable")
def send_command_w_enter(self, *args: Any, **kwargs: Any) -> str:
"""
For 'show run-config' Cisco WLC adds a 'Press Enter to continue...' message
Even though pagination is disabled.
show run-config also has excessive delays in the output which requires special
handling.
Arguments are the same as send_command_timing() method.
"""
if len(args) > 1:
raise ValueError("Must pass in delay_factor as keyword argument")
# If no delay_factor use 1 for default value
delay_factor = kwargs.get("delay_factor", 1)
kwargs["delay_factor"] = self.select_delay_factor(delay_factor)
output = self._send_command_timing_str(*args, **kwargs)
second_args = list(args)
if len(args) == 1:
second_args[0] = self.RETURN
else:
kwargs["command_string"] = self.RETURN
if not kwargs.get("max_loops"):
kwargs["max_loops"] = 150
if "Press any key" in output or "Press Enter to" in output:
# Send an 'enter'
output += self._send_command_timing_str(*second_args, **kwargs)
# WLC has excessive delay after this appears on screen
if "802.11b Advanced Configuration" in output:
# Defaults to 30 seconds
time.sleep(kwargs["delay_factor"] * 30)
not_done = True
i = 1
while not_done and i <= 150:
time.sleep(kwargs["delay_factor"] * 3)
i += 1
new_data = ""
new_data = self.read_channel()
if new_data:
output += new_data
else:
not_done = False
strip_prompt = kwargs.get("strip_prompt", True)
if strip_prompt:
# Had to strip trailing prompt twice.
output = self.strip_prompt(output)
output = self.strip_prompt(output)
return output
def _send_command_w_yes(self, *args: Any, **kwargs: Any) -> str:
"""
For 'show interface summary' Cisco WLC adds a
'Would you like to display the next 15 entries?' message.
Even though pagination is disabled
Arguments are the same as send_command_timing() method.
"""
if len(args) > 1:
raise ValueError("Must pass in delay_factor as keyword argument")
# If no delay_factor use 1 for default value
delay_factor = kwargs.get("delay_factor", 1)
kwargs["delay_factor"] = self.select_delay_factor(delay_factor)
output = ""
new_output = self._send_command_timing_str(*args, **kwargs)
second_args = list(args)
if len(args) == 1:
second_args[0] = "y"
else:
kwargs["command_string"] = "y"
strip_prompt = kwargs.get("strip_prompt", True)
while True:
output += new_output
if "display the next" in new_output.lower():
new_output = self._send_command_timing_str(*second_args, **kwargs)
else:
break
# Remove from output 'Would you like to display the next 15 entries? (y/n)'
pattern = r"^.*display the next.*\n$"
output = re.sub(pattern, "", output, flags=re.M)
if strip_prompt:
# Had to strip trailing prompt twice.
output = self.strip_prompt(output)
output = self.strip_prompt(output)
return output
def cleanup(self, command: str = "logout") -> None:
"""Reset WLC back to normal paging and gracefully close session."""
self.send_command_timing("config paging enable")
# Exit configuration mode
try:
# The pattern="" forces use of send_command_timing
if self.check_config_mode(pattern=""):
self.exit_config_mode()
except Exception:
pass
# End SSH/telnet session
self.write_channel(command + self.RETURN)
count = 0
output = ""
while count <= 5:
time.sleep(0.5)
# The connection might be dead at this point.
try:
output += self.read_channel()
except socket.error:
break
# Don't automatically save the config (user's responsibility)
if "Would you like to save them now" in output:
self._session_log_fin = True
self.write_channel("n" + self.RETURN)
time.sleep(0.5)
try:
self.write_channel(self.RETURN)
except socket.error:
break
count += 1
def check_config_mode(
self, check_string: str = "config", pattern: str = "", force_regex: bool = False
) -> bool:
"""Checks if the device is in configuration mode or not."""
if not pattern:
pattern = re.escape(self.base_prompt)
return super().check_config_mode(check_string, pattern)
def config_mode(
self, config_command: str = "config", pattern: str = "", re_flags: int = 0
) -> str:
"""Enter into config_mode."""
return super().config_mode(
config_command=config_command, pattern=pattern, re_flags=re_flags
)
def exit_config_mode(self, exit_config: str = "exit", pattern: str = "") -> str:
"""Exit config_mode."""
return super().exit_config_mode(exit_config, pattern)
def send_config_set(
self,
config_commands: Union[str, Sequence[str], Iterator[str], TextIO, None] = None,
exit_config_mode: bool = False,
enter_config_mode: bool = False,
**kwargs: Any,
) -> str:
return super().send_config_set(
config_commands=config_commands,
exit_config_mode=exit_config_mode,
enter_config_mode=enter_config_mode,
**kwargs,
)
def save_config(
self,
cmd: str = "save config",
confirm: bool = True,
confirm_response: str = "y",
) -> str:
"""Saves Config."""
self.enable()
if confirm:
output = self._send_command_timing_str(command_string=cmd)
if confirm_response:
output += self._send_command_timing_str(confirm_response)
else:
# Send enter by default
output += self._send_command_timing_str(self.RETURN)
else:
# Some devices are slow so match on trailing-prompt if you can
output = self._send_command_str(command_string=cmd)
return output
| {
"content_hash": "d659b51c7477ed4611890c515a5e59ab",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 88,
"avg_line_length": 33.815384615384616,
"alnum_prop": 0.5583484986351228,
"repo_name": "ktbyers/netmiko",
"id": "b4ece3b239a59bbf4cd7b8b0ff4e67f7ef2209c2",
"size": "8792",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netmiko/cisco/cisco_wlc_ssh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "384"
},
{
"name": "Python",
"bytes": "726727"
},
{
"name": "Shell",
"bytes": "21540"
}
],
"symlink_target": ""
} |
"""
production server
"""
from app.main import create_app
from app.config import Config
app = create_app(Config) #flask app instance | {
"content_hash": "bbdb43817a4fa3b8239191c7f8f26eb7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 44,
"avg_line_length": 16.75,
"alnum_prop": 0.753731343283582,
"repo_name": "rileymjohnson/fbla",
"id": "0c37a2323ff5b577aff8c558335e8337accbc11c",
"size": "134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "271399"
},
{
"name": "HTML",
"bytes": "117948"
},
{
"name": "JavaScript",
"bytes": "454805"
},
{
"name": "Python",
"bytes": "24252"
}
],
"symlink_target": ""
} |
"""questionnaire external id
Revision ID: cb2e353a154f
Revises: cd009f1475ff
Create Date: 2020-09-17 13:26:19.438307
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'cb2e353a154f'
down_revision = 'cd009f1475ff'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('questionnaire', sa.Column('external_id', sa.String(length=100), nullable=True))
op.add_column('questionnaire_history', sa.Column('external_id', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('questionnaire_history', 'external_id')
op.drop_column('questionnaire', 'external_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"content_hash": "22d8cd8ff015fba1c9b93c2c34fb2d5f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 125,
"avg_line_length": 33.62903225806452,
"alnum_prop": 0.7486810551558754,
"repo_name": "all-of-us/raw-data-repository",
"id": "10e9d84034b21c5dc8cac09c61ebcedde510a75f",
"size": "2085",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/alembic/versions/cb2e353a154f_questionnaire_external_id.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
"""The XFS format analyzer helper implementation."""
from dfvfs.analyzer import analyzer
from dfvfs.analyzer import analyzer_helper
from dfvfs.analyzer import specification
from dfvfs.lib import definitions
class XFSAnalyzerHelper(analyzer_helper.AnalyzerHelper):
"""XFS analyzer helper."""
FORMAT_CATEGORIES = frozenset([
definitions.FORMAT_CATEGORY_FILE_SYSTEM])
TYPE_INDICATOR = definitions.TYPE_INDICATOR_XFS
def GetFormatSpecification(self):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification or None if the format cannot
be defined by a specification object.
"""
format_specification = specification.FormatSpecification(
self.type_indicator)
# XFS file system signature.
format_specification.AddNewSignature(b'XFSB', offset=0)
return format_specification
analyzer.Analyzer.RegisterHelper(XFSAnalyzerHelper())
| {
"content_hash": "397e21d4f29f290f2c29710be39ef244",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 28.21212121212121,
"alnum_prop": 0.7593984962406015,
"repo_name": "joachimmetz/dfvfs",
"id": "3c4ed9f8b2542c47b1f8a9b2f7f3f0985279bc05",
"size": "955",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "dfvfs/analyzer/xfs_analyzer_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_tatooine_brea_tonnika.iff"
result.attribute_template_id = 9
result.stfName("theme_park_name","brea_tonnika")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "a8b2c15e513073c81280f99f5b613cdc",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.7028753993610224,
"repo_name": "anhstudios/swganh",
"id": "50b18c19838416b99c22dccca13be463b29eee2d",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_tatooine_brea_tonnika.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import m5
from m5.objects import *
from Caches import *
def config_cache(options, system):
if options.external_memory_system and (options.caches or options.l2cache):
print("External caches and internal caches are exclusive options.\n")
sys.exit(1)
if options.external_memory_system:
ExternalCache = ExternalCacheFactory(options.external_memory_system)
if options.cpu_type == "O3_ARM_v7a_3":
try:
from cores.arm.O3_ARM_v7a import *
except:
print("O3_ARM_v7a_3 is unavailable. Did you compile the O3 model?")
sys.exit(1)
dcache_class, icache_class, l2_cache_class, walk_cache_class = \
O3_ARM_v7a_DCache, O3_ARM_v7a_ICache, O3_ARM_v7aL2, \
O3_ARM_v7aWalkCache
else:
dcache_class, icache_class, l2_cache_class, walk_cache_class = \
L1_DCache, L1_ICache, L2Cache, None
if buildEnv['TARGET_ISA'] == 'x86':
walk_cache_class = PageTableWalkerCache
# Set the cache line size of the system
system.cache_line_size = options.cacheline_size
# If elastic trace generation is enabled, make sure the memory system is
# minimal so that compute delays do not include memory access latencies.
# Configure the compulsory L1 caches for the O3CPU, do not configure
# any more caches.
if options.l2cache and options.elastic_trace_en:
fatal("When elastic trace is enabled, do not configure L2 caches.")
if options.l2cache:
# Provide a clock for the L2 and the L1-to-L2 bus here as they
# are not connected using addTwoLevelCacheHierarchy. Use the
# same clock as the CPUs.
system.l2 = l2_cache_class(clk_domain=system.cpu_clk_domain,
size=options.l2_size,
assoc=options.l2_assoc)
system.tol2bus = L2XBar(clk_domain = system.cpu_clk_domain)
system.l2.cpu_side = system.tol2bus.master
system.l2.mem_side = system.membus.slave
if options.memchecker:
system.memchecker = MemChecker()
for i in xrange(options.num_cpus):
if options.caches:
icache = icache_class(size=options.l1i_size,
assoc=options.l1i_assoc)
dcache = dcache_class(size=options.l1d_size,
assoc=options.l1d_assoc)
# If we have a walker cache specified, instantiate two
# instances here
if walk_cache_class:
iwalkcache = walk_cache_class()
dwalkcache = walk_cache_class()
else:
iwalkcache = None
dwalkcache = None
if options.memchecker:
dcache_mon = MemCheckerMonitor(warn_only=True)
dcache_real = dcache
# Do not pass the memchecker into the constructor of
# MemCheckerMonitor, as it would create a copy; we require
# exactly one MemChecker instance.
dcache_mon.memchecker = system.memchecker
# Connect monitor
dcache_mon.mem_side = dcache.cpu_side
# Let CPU connect to monitors
dcache = dcache_mon
# When connecting the caches, the clock is also inherited
# from the CPU in question
system.cpu[i].addPrivateSplitL1Caches(icache, dcache,
iwalkcache, dwalkcache)
if options.memchecker:
# The mem_side ports of the caches haven't been connected yet.
# Make sure connectAllPorts connects the right objects.
system.cpu[i].dcache = dcache_real
system.cpu[i].dcache_mon = dcache_mon
elif options.external_memory_system:
# These port names are presented to whatever 'external' system
# gem5 is connecting to. Its configuration will likely depend
# on these names. For simplicity, we would advise configuring
# it to use this naming scheme; if this isn't possible, change
# the names below.
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
system.cpu[i].addPrivateSplitL1Caches(
ExternalCache("cpu%d.icache" % i),
ExternalCache("cpu%d.dcache" % i),
ExternalCache("cpu%d.itb_walker_cache" % i),
ExternalCache("cpu%d.dtb_walker_cache" % i))
else:
system.cpu[i].addPrivateSplitL1Caches(
ExternalCache("cpu%d.icache" % i),
ExternalCache("cpu%d.dcache" % i))
system.cpu[i].createInterruptController()
if options.l2cache:
system.cpu[i].connectAllPorts(system.tol2bus, system.membus)
elif options.external_memory_system:
system.cpu[i].connectUncachedPorts(system.membus)
else:
system.cpu[i].connectAllPorts(system.membus)
return system
# ExternalSlave provides a "port", but when that port connects to a cache,
# the connecting CPU SimObject wants to refer to its "cpu_side".
# The 'ExternalCache' class provides this adaptation by rewriting the name,
# eliminating distracting changes elsewhere in the config code.
class ExternalCache(ExternalSlave):
def __getattr__(cls, attr):
if (attr == "cpu_side"):
attr = "port"
return super(ExternalSlave, cls).__getattr__(attr)
def __setattr__(cls, attr, value):
if (attr == "cpu_side"):
attr = "port"
return super(ExternalSlave, cls).__setattr__(attr, value)
def ExternalCacheFactory(port_type):
def make(name):
return ExternalCache(port_data=name, port_type=port_type,
addr_ranges=[AllMemory])
return make
| {
"content_hash": "6528433f3ee040d04ef5b1f2c8c156d3",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 79,
"avg_line_length": 41.2,
"alnum_prop": 0.5937395379979913,
"repo_name": "TUD-OS/gem5-dtu",
"id": "3fa3676b08dbf1f6a9f10a9395f4b6b833f97f3a",
"size": "8214",
"binary": false,
"copies": "3",
"ref": "refs/heads/dtu-mmu",
"path": "configs/common/CacheConfig.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "648342"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1717604"
},
{
"name": "C++",
"bytes": "35149040"
},
{
"name": "CMake",
"bytes": "79529"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Forth",
"bytes": "15790"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "75007"
},
{
"name": "Makefile",
"bytes": "68265"
},
{
"name": "Objective-C",
"bytes": "24714"
},
{
"name": "Perl",
"bytes": "33696"
},
{
"name": "Python",
"bytes": "6073714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "14236"
},
{
"name": "Shell",
"bytes": "101649"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "Vim Script",
"bytes": "4335"
},
{
"name": "sed",
"bytes": "3927"
}
],
"symlink_target": ""
} |
class Symbol(object):
def __init__(self, num, name):
pass
| {
"content_hash": "74fb63d01e3b7aaec5c938d43fd84d07",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 34,
"avg_line_length": 17.75,
"alnum_prop": 0.5492957746478874,
"repo_name": "lodevil/pyparser",
"id": "b52c870a370c9d7291720f19f7026d4cc271c706",
"size": "72",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyparser/symbol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24601"
}
],
"symlink_target": ""
} |
from twisted.internet.defer import inlineCallbacks
from battlesnake.conf import settings
from battlesnake.core.inbound_command_handling.base import CommandError
from battlesnake.outbound_commands.think_fn_wrappers import btloadmech, \
btgetxcodevalue_ref, set_attrs, btdesignex
@inlineCallbacks
def load_ref_in_templater(protocol, unit_ref):
"""
Loads a ref in the templater for scanning/manipulation.
:param BattlesnakeTelnetProtocol protocol:
:param str unit_ref: The unit reference to load.
"""
p = protocol
is_valid_ref = yield btdesignex(p, unit_ref)
if not is_valid_ref:
raise CommandError("Invalid unit reference.")
templater_dbref = settings['unit_library']['templater_dbref']
yield btloadmech(p, templater_dbref, unit_ref)
# btloadmech() doesn't set the Mechname attrib, so we have to pull that
# from the template file and set it on the object ourselves.
mechname = yield btgetxcodevalue_ref(p, unit_ref, 'Mechname')
yield set_attrs(p, templater_dbref, {'Mechname': mechname})
| {
"content_hash": "af96a0a629b434b167fec570706929b8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 37.892857142857146,
"alnum_prop": 0.7360980207351555,
"repo_name": "gtaylor/btmux_battlesnake",
"id": "e61daa5c6586721a745d34d08af9d86a238bc233",
"size": "1061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "battlesnake/plugins/contrib/unit_library/outbound_commands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "396903"
}
],
"symlink_target": ""
} |
"""Performance test to make sure rule keys are unaffected by absolute paths.
The general algorithm is:
- Build all targets
- Rename directory being tested
- Build all targets, check to ensure everything pulls from dir cache
- Buck build all targets to verify no-op build works.
"""
import argparse
import re
import subprocess
import os
import tempfile
import shutil
import sys
from collections import defaultdict
from datetime import datetime
def createArgParser():
parser = argparse.ArgumentParser(
description='Run the buck performance test')
parser.add_argument(
'--perftest_id',
action='store',
type=str,
help='The identifier of this performance test')
parser.add_argument(
'--revisions_to_go_back',
action='store',
type=int,
help='The maximum number of revisions to go back when testing')
parser.add_argument(
'--iterations_per_diff',
action='store',
type=int,
help='The number of iterations to run on diff')
parser.add_argument(
'--targets_to_build',
action='append',
type=str,
help='The targets to build')
parser.add_argument(
'--repo_under_test',
action='store',
type=str,
help='Path to the repo under test')
parser.add_argument(
'--project_under_test',
action='store',
type=str,
help='Path to the project folder being tested under repo')
parser.add_argument(
'--path_to_buck',
action='store',
type=str,
help='The path to the buck binary')
parser.add_argument(
'--old_buck_revision',
action='store',
type=str,
help='The original buck revision')
parser.add_argument(
'--new_buck_revision',
action='store',
type=str,
help='The new buck revision')
return parser
def log(message):
print '%s\t%s' % (str(datetime.now()), message)
sys.stdout.flush()
def timedelta_total_seconds(timedelta):
return (
timedelta.microseconds + 0.0 +
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
class BuildResult():
def __init__(self, time_delta, cache_results, rule_key_map):
self.time_delta = time_delta
self.cache_results = cache_results
self.rule_key_map = rule_key_map
def clean(cwd):
log('Running hg purge.')
subprocess.check_call(
['hg', 'purge', '--all'],
cwd=cwd)
def reset(revision, cwd):
subprocess.check_call(
['hg', 'revert', '-a', '-r', revision],
cwd=cwd)
def ant_clean_build(buck_repo):
log('Running ant clean default.')
subprocess.check_call(
['ant', 'clean', 'default'],
cwd=buck_repo)
def buck_clean(args, cwd):
log('Running buck clean.')
subprocess.check_call(
[args.path_to_buck, 'clean'],
cwd=cwd)
BUILD_RESULT_LOG_LINE = re.compile(
r'BuildRuleFinished\((?P<rule_name>[\w_\-:#\/,]+)\): (?P<result>[A-Z_]+) '
r'(?P<cache_result>[A-Z_]+) (?P<success_type>[A-Z_]+) '
r'(?P<rule_key>[0-9a-f]*)')
RULEKEY_LINE = re.compile(
r'^INFO: RuleKey (?P<rule_key>[0-9a-f]*)='
r'(?P<rule_key_debug>.*)$')
BUCK_LOG_RULEKEY_LINE = re.compile(
r'.*\[[\w ]+\](?:\[command:[0-9a-f-]+\])?\[tid:\d+\]'
r'\[com.facebook.buck.rules.keys.RuleKey[\$\.]?Builder\] '
r'RuleKey (?P<rule_key>[0-9a-f]+)='
r'(?P<rule_key_debug>.*)$')
def buck_build_target(args, cwd, targets, log_as_perftest=True):
"""Builds a target with buck and returns performance information.
"""
log('Running buck build %s.' % ' '.join(targets))
bucklogging_properties_path = os.path.join(
cwd, '.bucklogging.local.properties')
with open(bucklogging_properties_path, 'w') as bucklogging_properties:
# The default configuration has the root logger and FileHandler
# discard anything below FINE level.
#
# We need RuleKey logging, which uses FINER (verbose), so the
# root logger and file handler both need to be reconfigured
# to enable verbose logging.
bucklogging_properties.write(
""".level=FINER
java.util.logging.FileHandler.level=FINER""")
env = os.environ.copy()
# Force buck to pretend it's repo is clean.
env.update({
'BUCK_REPOSITORY_DIRTY': '0'
})
if log_as_perftest:
with open('.buckjavaargs.local', 'a') as f:
f.write('-Dbuck.perftest_id=%s\n' % (args.perftest_id,))
f.write('-Dbuck.perftest_side=new\n')
start = datetime.now()
tmpFile = tempfile.TemporaryFile()
try:
subprocess.check_call(
[
args.path_to_buck,
'build',
'--deep',
# t16296463
'--config',
'project.glob_handler=',
'--config',
'cache._exp_propagation=false',
] + targets + ['-v', '5'],
stdout=tmpFile,
stderr=tmpFile,
cwd=cwd,
env=env)
except:
tmpFile.seek(0)
log('Buck build failed: %s' % tmpFile.read())
raise
tmpFile.seek(0)
finish = datetime.now()
(cache_results, rule_key_map) = build_maps(cwd, tmpFile)
result = BuildResult(finish - start, cache_results, rule_key_map)
cache_counts = {}
for key, value in result.cache_results.iteritems():
cache_counts[key] = len(value)
log('Test Build Finished! Elapsed Seconds: %d, Cache Counts: %s' % (
timedelta_total_seconds(result.time_delta), repr(cache_counts)))
return result
def build_maps(cwd, tmpFile):
java_utils_log_path = os.path.join(
cwd,
'buck-out', 'log', 'buck-0.log')
if os.path.exists(java_utils_log_path):
pattern = BUCK_LOG_RULEKEY_LINE
build_output_file = open(java_utils_log_path)
else:
pattern = RULEKEY_LINE
build_output_file = tmpFile
rule_debug_map = {}
for line in build_output_file:
match = pattern.match(line)
if match:
rule_debug_map[match.group('rule_key')] = match.group(
'rule_key_debug')
logfile_path = os.path.join(
cwd,
'buck-out', 'bin', 'build.log')
cache_results = defaultdict(list)
rule_key_map = {}
with open(logfile_path, 'r') as logfile:
for line in logfile.readlines():
line = line.strip()
match = BUILD_RESULT_LOG_LINE.search(line)
if match:
rule_name = match.group('rule_name')
rule_key = match.group('rule_key')
if not rule_key in rule_debug_map:
raise Exception("""ERROR: build.log contains an entry
which was not found in buck build -v 5 output.
Rule: {0}, rule key: {1}""".format(rule_name, rule_key))
cache_results[match.group('cache_result')].append({
'rule_name': rule_name,
'rule_key': rule_key,
'rule_key_debug': rule_debug_map[rule_key]
})
rule_key_map[match.group('rule_name')] = (rule_key, rule_debug_map[rule_key])
return (cache_results, rule_key_map)
def set_cache_settings(
args,
cwd,
cache_mode,
dir_cache_only=True):
log('Reconfiguring cache settings:')
buckconfig_contents = """[cache]
%s
dir = buck-cache
dir_mode = %s
[build]
# Some repositories set this to a lower value, which breaks an assumption
# in this test: that all rules with correct rule keys will get hits.
artifact_cache_size_limit = 2000000000
""" % ('mode = dir' if dir_cache_only else '', cache_mode)
log(buckconfig_contents)
buckconfig_path = os.path.join(cwd, '.buckconfig.local')
with open(buckconfig_path, 'w') as buckconfig:
buckconfig.write(buckconfig_contents)
buckconfig.truncate()
buckversion_path = os.path.join(cwd, '.buckversion')
with open(buckversion_path, 'w') as buckversion:
buckversion.write(args.new_buck_revision + os.linesep)
buckversion.truncate()
def build_all_targets(
args,
cwd,
cache_mode,
run_clean=True,
dir_cache_only=True,
log_as_perftest=True):
set_cache_settings(
args,
cwd,
cache_mode,
dir_cache_only=dir_cache_only)
targets = []
for target_str in args.targets_to_build:
targets.extend(target_str.split(','))
if run_clean:
buck_clean(args, cwd)
return buck_build_target(
args,
cwd,
targets,
log_as_perftest=log_as_perftest)
def check_cache_results(result, expected_keys, message, exception_message, last_result):
suspect_keys = [
x
for x in result.cache_results.keys()
if x not in expected_keys
]
if suspect_keys:
log(message)
for result_type in suspect_keys:
for rule in result.cache_results[result_type]:
rule_name = rule['rule_name']
key, key_debug = result.rule_key_map[rule_name]
old_key, old_key_debug = last_result.rule_key_map[rule_name]
log('Rule %s, result %s.' % (rule_name, result_type))
log('\tOld Rule Key (%s): %s.' % (old_key, old_key_debug))
log('\tNew Rule Key (%s): %s.' % (key, key_debug))
raise Exception(exception_message)
def get_buck_repo_root(path):
while (path is not None and
not os.path.exists(os.path.join(path, '.buckconfig'))):
path = os.path.dirname(path)
return path
def move_mount(from_mount, to_mount):
subprocess.check_call("sync")
subprocess.check_call(["mount", "--move", from_mount, to_mount])
for subdir, dirs, files in os.walk(to_mount):
for file in files:
path = os.path.join(subdir, file)
if (os.path.islink(path) and
os.path.realpath(path).startswith(from_mount + '/')):
new_path = os.path.realpath(path).replace(
from_mount + '/',
to_mount + '/'
)
os.unlink(path)
os.symlink(new_path, path)
def main():
args = createArgParser().parse_args()
log('Running Performance Test!')
ant_clean_build(get_buck_repo_root(args.path_to_buck))
clean(args.repo_under_test)
log('=== Warming up cache ===')
cwd = os.path.join(args.repo_under_test, args.project_under_test)
last_result = build_all_targets(
args,
cwd,
'readwrite',
dir_cache_only=False,
log_as_perftest=False)
log('=== Cache Warm! Running tests ===')
new_directory_name = (os.path.basename(args.repo_under_test) +
'_test_iteration_')
# Rename the directory to flesh out any cache problems.
cwd_root = os.path.join(os.path.dirname(args.repo_under_test),
new_directory_name)
cwd = os.path.join(cwd_root, args.project_under_test)
log('Renaming %s to %s' % (args.repo_under_test, cwd_root))
if not os.path.isfile('/proc/mounts'):
is_mounted = False
else:
with open('/proc/mounts', 'r') as mounts:
# grab the second element (mount point) from /proc/mounts
lines = [l.strip().split() for l in mounts.read().splitlines()]
lines = [l[1] for l in lines if len(l) >= 2]
is_mounted = args.repo_under_test in lines
if is_mounted:
if not os.path.exists(cwd_root):
os.makedirs(cwd_root)
move_mount(args.repo_under_test, cwd_root)
else:
# If cwd_root exists, it means that a previous attempt to run
# this script wasn't able to clean up that folder properly.
# In this case, we clean up that folder.
shutil.rmtree(cwd_root, ignore_errors=True)
os.rename(args.repo_under_test, cwd_root)
try:
log('== Checking for problems with absolute paths ==')
result = build_all_targets(args, cwd, 'readonly')
check_cache_results(result,
['DIR_HIT', 'IGNORED', 'LOCAL_KEY_UNCHANGED_HIT'],
'Building was unable to reuse the cache from a '
'previous run. This suggests one of the rule keys '
'contains an absolute path.',
'Failed to reuse cache across directories!!!',
last_result)
log('== Ensure noop build does nothing. ==')
result = build_all_targets(
args,
cwd,
'readonly',
run_clean=False)
check_cache_results(result, ['LOCAL_KEY_UNCHANGED_HIT'],
'Doing a noop build not hit all of its keys.',
'Doing a noop build not hit all of its keys.',
last_result)
finally:
log('Renaming %s to %s' % (cwd_root, args.repo_under_test))
if is_mounted:
move_mount(cwd_root, args.repo_under_test)
shutil.rmtree(cwd_root)
else:
os.rename(cwd_root, args.repo_under_test)
if __name__ == '__main__':
main()
| {
"content_hash": "853baf3ac5b859c4c77449acd3bdb8d0",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 93,
"avg_line_length": 33.18316831683168,
"alnum_prop": 0.5669103386543339,
"repo_name": "shs96c/buck",
"id": "bf482ea613a9597ba5565e23085ddfde9a38cc49",
"size": "13428",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/perf_test_hg.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1585"
},
{
"name": "Batchfile",
"bytes": "2563"
},
{
"name": "C",
"bytes": "274563"
},
{
"name": "C#",
"bytes": "237"
},
{
"name": "C++",
"bytes": "18013"
},
{
"name": "CSS",
"bytes": "54894"
},
{
"name": "D",
"bytes": "1017"
},
{
"name": "Dockerfile",
"bytes": "1938"
},
{
"name": "Go",
"bytes": "9630"
},
{
"name": "Groovy",
"bytes": "3362"
},
{
"name": "HTML",
"bytes": "7188"
},
{
"name": "Haskell",
"bytes": "1008"
},
{
"name": "IDL",
"bytes": "480"
},
{
"name": "Java",
"bytes": "28323049"
},
{
"name": "JavaScript",
"bytes": "934510"
},
{
"name": "Kotlin",
"bytes": "21626"
},
{
"name": "Lex",
"bytes": "3241"
},
{
"name": "Makefile",
"bytes": "1916"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "4935"
},
{
"name": "Objective-C",
"bytes": "172139"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "486"
},
{
"name": "Prolog",
"bytes": "1486"
},
{
"name": "Python",
"bytes": "2027822"
},
{
"name": "Roff",
"bytes": "1207"
},
{
"name": "Rust",
"bytes": "5199"
},
{
"name": "Scala",
"bytes": "5082"
},
{
"name": "Shell",
"bytes": "67040"
},
{
"name": "Smalltalk",
"bytes": "3922"
},
{
"name": "Swift",
"bytes": "11377"
},
{
"name": "Thrift",
"bytes": "80526"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
import pytest
import six
from jak import helpers
jakfile_content_1 = """
// Comment 1
{
// Comment 2
"password_file": "jakpassword",
// Comment 3
"files_to_encrypt": [ "env", "env2" ] // Inline-Comment 4
// "commented out line": 5
} // Comment 5 (seriously?)
// Comment 6
// Comment 7
"""
def test_remove_comments_from_JSON():
result = helpers._remove_comments_from_JSON(jakfile_content_1)
assert result == '{"password_file":"jakpassword","files_to_encrypt":["env","env2"]}'
def test_read_jakfile_to_dict(tmpdir):
jakfile = tmpdir.join("jakfile")
jakfile.write(jakfile_content_1)
assert jakfile.read() == jakfile_content_1
result = helpers.read_jakfile_to_dict(jwd=jakfile.dirpath().strpath)
assert isinstance(result, dict)
assert 'files_to_encrypt' in result
assert 'password_file' in result
def test_grouper():
assert helpers.grouper('aaa', 1) == ('a', 'a', 'a')
assert helpers.grouper('aaa', 5) == ('aaa', )
assert helpers.grouper('aaabbbcc', 3) == ('aaa', 'bbb', 'cc')
# Raise error due to 2 not being iterable
with pytest.raises(TypeError):
helpers.grouper(2, 1)
def test_generate_256bit_key():
key = helpers.generate_256bit_key()
assert len(key) == 64
assert isinstance(key, six.binary_type)
def test_get_jak_working_directory(tmpdir):
'''
/repo/.git/gitfile
/repo/sub1/sub2/nestedfile
'''
# No parent .git
norepo = tmpdir.mkdir('norepo')
result = helpers.get_jak_working_directory(cwd=norepo.strpath)
assert result == norepo.strpath
# Current has .git
repo = tmpdir.mkdir('repo')
gitfile = repo.mkdir('.git').join('gitfile')
gitfile.write('this is a git repo')
result = helpers.get_jak_working_directory(cwd=repo.strpath)
assert result == repo.strpath
# Parent has a .git
nested = repo.mkdir('sub1').mkdir('sub2')
# nested.write('I am a nested file')
result = helpers.get_jak_working_directory(cwd=nested.strpath)
assert '/repo' in result
assert result.count('/') > 3
def test_does_jwd_have_gitignore(tmpdir):
repo = tmpdir.mkdir("repo_folder")
git_ignore = repo.join(".gitignore")
git_ignore.write("i exist")
# this will pass because the .gitignore is in the CWD
assert helpers.does_jwd_have_gitignore(cwd=repo.strpath)
subdir = repo.mkdir('sub')
# This will fail because there is no .git folder in any parent
# and the CWD does not have a .gitignore
assert not helpers.does_jwd_have_gitignore(cwd=subdir.strpath)
repo.mkdir('.git')
# This will be true because the parent now has .git and .gitignore
assert helpers.does_jwd_have_gitignore(cwd=subdir.strpath)
def test_create_backup_filepath():
output = helpers.create_backup_filepath(jwd='/a/b/c', filepath='/a/b/c/d/e.txt')
assert output == '/a/b/c/.jak/d_e.txt_backup'
# Special case, root.
output = helpers.create_backup_filepath(jwd='/', filepath='/a')
assert output == '/.jak/a_backup'
output = helpers.create_backup_filepath(jwd='/a/b', filepath='/a/b/c')
assert output == '/a/b/.jak/c_backup'
output = helpers.create_backup_filepath(jwd='/a/b', filepath='/a/b/c/d/e')
assert output == '/a/b/.jak/c_d_e_backup'
| {
"content_hash": "0d55c493bb39ea0081864d9047b68a2a",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 88,
"avg_line_length": 30.299065420560748,
"alnum_prop": 0.6582356570018507,
"repo_name": "dispel/jak",
"id": "07fc2f1dbee40ac3ceb924eb6fddb9fcda2c0d6e",
"size": "3267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "67541"
},
{
"name": "Shell",
"bytes": "470"
}
],
"symlink_target": ""
} |
import re
from typing import Dict, Sequence, Tuple
from unittest import TestCase, mock
from google.api_core.retry import Retry
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.os_login import OSLoginHook
from tests.providers.google.cloud.utils.base_gcp_mock import (
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
TEST_GCP_CONN_ID: str = "test-gcp-conn-id"
TEST_DELEGATE_TO: str = "test-delegate-to"
TEST_PROJECT_ID: str = "test-project-id"
TEST_PROJECT_ID_2: str = "test-project-id-2"
TEST_USER: str = "test-user"
TEST_CREDENTIALS = mock.MagicMock()
TEST_BODY: Dict = mock.MagicMock()
TEST_RETRY: Retry = mock.MagicMock()
TEST_TIMEOUT: float = 4
TEST_METADATA: Sequence[Tuple[str, str]] = ()
TEST_PARENT: str = "users/test-user"
class TestOSLoginHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.import_ssh_public_key.assert_called_once_with(
request=dict(
parent=TEST_PARENT,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
class TestOSLoginHookWithDefaultProjectIdHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_2),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.import_ssh_public_key.assert_called_once_with(
request=dict(
parent=TEST_PARENT,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID_2,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
class TestOSLoginHookWithoutDefaultProjectIdHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_2),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.import_ssh_public_key.assert_called_once_with(
request=dict(parent=TEST_PARENT, ssh_public_key=TEST_BODY, project_id=TEST_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
TEST_MESSAGE = re.escape(
"The project id must be passed either as keyword project_id parameter or as project_id extra in "
"Google Cloud connection definition. Both are not set!"
)
class TestOSLoginHookMissingProjectIdHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with self.assertRaisesRegex(AirflowException, TEST_MESSAGE):
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| {
"content_hash": "0c404a9ca933e2f8e1604730b3184f2a",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 107,
"avg_line_length": 36.358024691358025,
"alnum_prop": 0.6244482173174872,
"repo_name": "airbnb/airflow",
"id": "7e37569539b6378b4173e616e00b6461dfe571b5",
"size": "6675",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/hooks/test_os_login.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36374"
},
{
"name": "HTML",
"bytes": "99535"
},
{
"name": "JavaScript",
"bytes": "891618"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "796220"
},
{
"name": "Shell",
"bytes": "9040"
}
],
"symlink_target": ""
} |
def join(*args):
"""
Join url paths together
"""
args_with_no_ws = filter(lambda arg: arg.strip(), args)
if not len(args_with_no_ws):
return ""
if len(args_with_no_ws) == 1:
return args_with_no_ws[0]
joined = '/'.join(path.strip('/') for path in args_with_no_ws)
last_arg = args_with_no_ws[-1]
return joined + '/' if last_arg[-1] == '/' else joined
# vim: filetype=python
| {
"content_hash": "56d7486534baaff118f3566e0007ee12",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 21.55,
"alnum_prop": 0.5614849187935035,
"repo_name": "ryankanno/py-utilities",
"id": "13c49aa2cfa046a1a42d97e39c834bb815d50715",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_utilities/http/url_utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64595"
},
{
"name": "Shell",
"bytes": "6471"
}
],
"symlink_target": ""
} |
"""
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import tempfile
import pytest
from django.db.utils import OperationalError
from mock import patch
import kolibri
from kolibri.plugins.utils import autoremove_unavailable_plugins
from kolibri.utils import cli
from kolibri.utils import options
logger = logging.getLogger(__name__)
LOG_LOGGER = []
def log_logger(logger_instance, LEVEL, msg, args, **kwargs):
"""
Monkeypatching for logging.Logger._log to scoop up log messages if we wanna
test something specific was logged.
"""
LOG_LOGGER.append((LEVEL, msg))
# Call the original function
logger_instance.__log(LEVEL, msg, args, **kwargs)
def activate_log_logger(monkeypatch):
"""
Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern
of py.test (test accepts a ``monkeypatch`` argument)
"""
monkeypatch.setattr(logging.Logger, "__log", logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, "_log", log_logger)
@pytest.fixture
def plugins():
from kolibri import plugins
_, config_file = tempfile.mkstemp(suffix="json")
old_config_file = plugins.conf_file
plugins.conf_file = config_file
plugins.config.set_defaults()
yield plugins
plugins.conf_file = old_config_file
def test_bogus_plugin_autoremove(plugins):
"""
Checks that a plugin is auto-removed when it cannot be imported
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_autoremove_no_path(plugins):
"""
Checks that a plugin without a dotted path is also auto-removed
"""
plugin_name = "giraffehorse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
disabled_apps_before = plugins.config["DISABLED_PLUGINS"].copy()
try:
cli.disable.callback(("i_do_not_exist",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
assert disabled_apps_before == plugins.config["DISABLED_PLUGINS"]
def test_plugin_cannot_be_imported_disable(plugins):
"""
A plugin may be in plugins.config['INSTALLED_PLUGINS'] but broken or uninstalled
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
try:
cli.disable.callback((plugin_name,), False)
except Exception:
pass
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
# We also don't want to endlessly add cruft to the disabled apps
assert plugin_name not in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable_twice(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_plugin_with_no_plugin_class(plugins):
"""
Expected behavior is that nothing blows up with exceptions, user just gets
a warning and nothing is enabled or changed in the configuration.
"""
# For fun, we pass in a system library
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
try:
cli.enable.callback(("os.path",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
@pytest.mark.django_db
def test_kolibri_listen_port_env(monkeypatch):
"""
Starts and stops the server, mocking the actual server.start()
Checks that the correct fallback port is used from the environment.
"""
with patch("django.core.management.call_command"), patch(
"kolibri.utils.server.start"
) as start:
from kolibri.utils import server
def start_mock(port, *args, **kwargs):
assert port == test_port
try:
os.remove(server.STARTUP_LOCK)
except OSError:
pass
activate_log_logger(monkeypatch)
start.side_effect = start_mock
test_port = 1234
os.environ["KOLIBRI_HTTP_PORT"] = str(test_port)
# force a reload of plugins.OPTIONS so the environment variable will be read in
from kolibri.utils import conf
conf.OPTIONS.update(options.read_options_file(conf.KOLIBRI_HOME))
cli.start.callback(test_port, False)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
# Stop the server AGAIN, asserting that we can call the stop command
# on an already stopped server and will be gracefully informed about
# it.
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
assert "Already stopped" in LOG_LOGGER[-1][1]
def status_starting_up():
raise server.NotRunning(server.STATUS_STARTING_UP)
# Ensure that if a server is reported to be 'starting up', it doesn't
# get killed while doing that.
monkeypatch.setattr(server, "get_status", status_starting_up)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == server.STATUS_STARTING_UP
assert "Not stopped" in LOG_LOGGER[-1][1]
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="")
@patch("kolibri.utils.cli.update")
@patch("kolibri.utils.cli.plugin.callback")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_first_run(dbbackup, plugin, update, get_version):
"""
Tests that the first_run() function performs as expected
"""
cli.initialize()
update.assert_called_once()
dbbackup.assert_not_called()
# Check that it got called for each default plugin
from kolibri import plugins
assert set(plugins.config["INSTALLED_PLUGINS"]) == set(plugins.DEFAULT_PLUGINS)
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
@patch("kolibri.utils.cli.update")
def test_update(update, get_version):
"""
Tests that update() function performs as expected
"""
cli.initialize()
update.assert_called_once()
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
def test_update_exits_if_running(get_version):
"""
Tests that update() function performs as expected
"""
with patch("kolibri.utils.cli.server.get_status"):
try:
cli.initialize()
pytest.fail("Update did not exit when Kolibri was already running")
except SystemExit:
pass
@pytest.mark.django_db
def test_version_updated():
"""
Tests our db backup logic: version_updated gets any change, backup gets only non-dev changes
"""
assert cli.version_updated("0.10.0", "0.10.1")
assert not cli.version_updated("0.10.0", "0.10.0")
assert not cli.should_back_up("0.10.0-dev0", "")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0")
assert not cli.should_back_up("0.10.0", "0.10.0-dev0")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0-dev0")
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value=kolibri.__version__)
@patch("kolibri.utils.cli.update")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_update_no_version_change(dbbackup, update, get_version):
"""
Tests that when the version doesn't change, we are not doing things we
shouldn't
"""
cli.initialize()
update.assert_not_called()
dbbackup.assert_not_called()
def test_cli_usage():
# Test the -h
with pytest.raises(SystemExit) as excinfo:
cli.main("-h")
assert excinfo.code == 0
with pytest.raises(SystemExit) as excinfo:
cli.main("--version")
assert excinfo.code == 0
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
any(
map(
lambda x: test_plugin in x[0] and "ENABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins_disabled(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
cli.disable.callback((test_plugin,), False)
any(
map(
lambda x: test_plugin in x[0] and "DISABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli._migrate_databases")
@patch("kolibri.utils.cli.version_updated")
def test_migrate_if_unmigrated(version_updated, _migrate_databases):
# No matter what, ensure that version_updated returns False
version_updated.return_value = False
from morango.models import InstanceIDModel
with patch.object(
InstanceIDModel, "get_or_create_current_instance"
) as get_or_create_current_instance:
get_or_create_current_instance.side_effect = OperationalError("Test")
cli.initialize()
_migrate_databases.assert_called_once()
| {
"content_hash": "be380580f7f9366bfa7d866a028c7b26",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 96,
"avg_line_length": 32.684375,
"alnum_prop": 0.6795104694521464,
"repo_name": "mrpau/kolibri",
"id": "f70543b92bf5c3f32227c1e22d912116c437eda4",
"size": "10459",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/utils/tests/test_cli.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "601"
},
{
"name": "CSS",
"bytes": "1716299"
},
{
"name": "Dockerfile",
"bytes": "7303"
},
{
"name": "Gherkin",
"bytes": "278074"
},
{
"name": "HTML",
"bytes": "26440"
},
{
"name": "JavaScript",
"bytes": "1537923"
},
{
"name": "Makefile",
"bytes": "13308"
},
{
"name": "Python",
"bytes": "2298911"
},
{
"name": "Shell",
"bytes": "11777"
},
{
"name": "Vue",
"bytes": "1558714"
}
],
"symlink_target": ""
} |
from google.cloud import ids_v1
async def sample_get_endpoint():
# Create a client
client = ids_v1.IDSAsyncClient()
# Initialize request argument(s)
request = ids_v1.GetEndpointRequest(
name="name_value",
)
# Make the request
response = await client.get_endpoint(request=request)
# Handle the response
print(response)
# [END ids_v1_generated_IDS_GetEndpoint_async]
| {
"content_hash": "ced931904a6aa0feda499f516d49f2e8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 57,
"avg_line_length": 21.842105263157894,
"alnum_prop": 0.6795180722891566,
"repo_name": "googleapis/python-ids",
"id": "48b6685d0325d6b7f5c4c4af670801e7e2178dc8",
"size": "1779",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/ids_v1_generated_ids_get_endpoint_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "242228"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ChaosContextMapItem(Model):
"""Describes an item in the ChaosContextMap in ChaosParameters.
.
:param key: The key for a ChaosContextMapItem.
:type key: str
:param value: The value for a ChaosContextMapItem.
:type value: str
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'key': {'key': 'Key', 'type': 'str'},
'value': {'key': 'Value', 'type': 'str'},
}
def __init__(self, key, value):
self.key = key
self.value = value
| {
"content_hash": "06961b9aa7021551209a9e3b5ba6ce58",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 23.692307692307693,
"alnum_prop": 0.564935064935065,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "435baa331bf12227f2dd481100ee6c5797b6f5f2",
"size": "1090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-servicefabric/azure/servicefabric/models/chaos_context_map_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
from __future__ import division
'''
FaceSync Class
==========================================
Class to sync videos by audio matching.
'''
__all__ = ['facesync']
__author__ = ["Jin Hyun Cheong"]
__license__ = "MIT"
import os
import numpy as np
import subprocess
import scipy.io.wavfile as wav
def _get_vid_resolution(vidFile):
""" Gets video resolution for a given file using ffprobe.
"""
cmd = [
'ffprobe','-v','error','-of','flat=s=_','-select_streams','v:0','-show_entries','stream=height,width', vidFile
]
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE)
out = proc.communicate()[0]
out = out.split('\n')[:2]
return tuple([int(elem.split('=')[-1]) for elem in out])
def write_offset_to_file(afile, offset, header='offset'):
'''
Helper function to write offset output to file.
'''
(path2fname, fname) = os.path.split(afile)
fname = os.path.join(path2fname,fname.split(".")[0] + '.txt')
f = open(fname, 'a+')
f.write(header+'\n')
f.write(str(offset)+'\n')
f.close()
def processInput(rate0,data0,afile,fps,length,search_start,search_end,verbose):
'''
Helper function for multiprocessing
'''
if verbose:
print(afile)
rate1,data1 = wav.read(afile)
assert(rate0==rate1), "Audio sampling rate is not the same for target and sample" # Check if they have same rate
searchtime = search_end-search_start # seconds to search alignment
if np.ndim(data0)>1:
data0 = data0[:,0]
if np.ndim(data1)>1:
data1 = data1[:,0]
to_compare = data0[0:rate0*length]
try:
assert(data1.shape[0] - (searchtime+length)*rate0 >= 0)
except:
print("Original length need to be shorter or reduce searchtime to allow buffer at end.")
rs = []
ts = []
# for i in np.linspace(0,searchtime,fps*searchtime):
inputs = list(np.linspace(search_start,search_end,fps*searchtime))
ts = inputs
rs.append(rs)
# offset_r = ts[np.argmax(rs)] + search_start
offset_r = ts[np.argmax(rs)]
self.offsets.append(offset_r)
write_offset_to_file(afile, offset_r,header='corr_multi')
return rs,offset_r
def calc_rs(i, to_compare, sample):
try:
assert(to_compare.shape[0]==sample.shape[0])
r=np.corrcoef(to_compare,sample)[0][1]
except:
print("Shape mismatch at %s" %str(i))
return r, i
class facesync(object):
"""
facesync is a class to represents multiple videos
so that one can align them based on audio.
Args:
data: list of video files
Y: Pandas DataFrame of training labels
X: Pandas DataFrame Design Matrix for running univariate models
mask: binary nifiti file to mask brain data
output_file: Name to write out to nifti file
**kwargs: Additional keyword arguments to pass to the prediction algorithm
"""
def __init__(self, video_files=None, audio_files=None, target_audio = None, offsets=None,**kwargs):
'''
Args:
video_files: list of video filenames to process
audio_files: list of video filenames to process
target_audio: audio to which videos will be aligned
offsets: list of offsets to trim the video_files
'''
# Initialize attributes
self.video_files = video_files
self.audio_files = audio_files
self.target_audio = target_audio
self.offsets = offsets
if self.video_files is not None:
assert(isinstance(self.video_files,list)),'Place path to files in a list'
if self.audio_files is not None:
assert(isinstance(self.audio_files,list)),'Place path to files in a list'
if (self.video_files is not None) & (self.offsets is not None):
assert(len(self.video_files)==len(self.offsets)),'Number of videos and number of offsets should match'
def extract_audio(self,rate=44100,call=True,verbose=True):
'''
This method extracts audio from video files in self.video_files and saves audio files in self.audio_files
Input
------------
rate: rate of audio stream frequency to be extracted, default 44100
call: boolean, whether to wait for each process to finish or open multiple threads
verbose: if True, prints the currently processing audio filename
'''
assert(len(self.video_files)!=0),'No video files to process'
self.audio_files = []
for i, vidfile in enumerate(self.video_files):
if verbose:
print(vidfile)
(path2fname, vname) = os.path.split(vidfile)
aname = vname.split(".")[0] + ".wav"
infile = os.path.join(path2fname,vname)
outfile = os.path.join(path2fname,aname)
self.audio_files.append(outfile)
# cmd = ' '.join(["avconv", "-i", infile, "-y", "-vn", "-ac", "1","-ar",str(rate),"-f", "wav", outfile])
command = "ffmpeg -y -i " + infile + " -ab 128k -ac 2 -ar " +str(rate) +" -vn " + outfile
if call:
subprocess.call(command, shell=True)
else:
subprocess.Popen(command, shell=True)
def find_offset_cross(self,length = 10,search_start=0,verbose=True):
'''
Find offset using Fourier Transform cross correlation.
Input
------------
length: seconds to use for the cross correlation matching, default is 10 seconds
verbose: if True, prints the currently processing audio filename
Output
------------
allrs : list of cross correlation results using fftconvolve. to retrieve the offset time need to zero index and subtract argmax.
'''
import numpy as np
from scipy.signal import fftconvolve
assert(self.target_audio is not None), 'Target audio not specified'
assert(self.audio_files is not None), 'Audio files not specified'
self.offsets = []
rate0,data0 = wav.read(self.target_audio)
allrs = []
for i, afile in enumerate(self.audio_files):
if verbose:
print(afile)
rate1,data1 = wav.read(afile)
assert(rate0==rate1), "Audio sampling rate is not the same for target and sample" # Check if they have same rate
# Take first audio channel
if np.ndim(data0)>1:
data0 = data0[:,0]
if np.ndim(data1)>1:
data1 = data1[:,0]
x = data0[:rate0*length] # target audio
y = data1[int(search_start*rate0):int(search_start*rate0)+rate0*length] # change sample audio location
# Pad target audio with zeros if not same length.
if len(x) < len(y):
xnew = np.zeros_like(y)
xnew[:len(x)] = x
x = xnew
assert(len(x)==len(y)), "Length of two samples must be the same"
crosscorr = fftconvolve(x,y[::-1],'full')
zero_index = int(len(crosscorr) / 2 ) -1
offset_x = search_start+(zero_index - np.argmax(crosscorr))/float(rate0)
# assert(len(crosscorr)==len(x))
self.offsets.append(offset_x)
write_offset_to_file(afile, offset_x,header='xcorr_len'+str(length))
allrs.append(crosscorr)
return allrs
def find_offset_corr(self,length=5,search_start=0,search_end=20,fps=44100,verbose=True):
'''
Find offset based on correlation of two audio.
Input
------------
self.target_audio : Original audio to which other files will be aligned to
self.audio_files : List of audio files that needs to be trimmed
length : length of original sample to compare
search_start, search_end: start and end times to search for alignment in seconds
fps: level of temporal precision, default 44100
verbose: if True, prints the currently processing audio filename
Output
------------
rs: correlation values
'''
assert(self.target_audio is not None), 'Target audio not specified'
assert(self.audio_files is not None), 'Audio files not specified'
self.offsets = []
allrs = []
rate0,data0 = wav.read(self.target_audio)
for i, afile in enumerate(self.audio_files):
if verbose:
print(afile)
rate1,data1 = wav.read(afile)
assert(rate0==rate1), "Audio sampling rate is not the same for target and sample" # Check if they have same rate
searchtime = search_end-search_start # seconds to search alignment
if np.ndim(data0)>1:
data0 = data0[:,0]
if np.ndim(data1)>1:
data1 = data1[:,0]
to_compare = data0[0:rate0*length]
try:
assert(data1.shape[0] - (searchtime+length)*rate0 >= 0)
except:
print("Original length need to be shorter or reduce searchtime to allow buffer at end.")
rs = []
ts = []
# for i in np.linspace(0,searchtime,fps*searchtime):
for i in np.linspace(search_start,search_end,fps*searchtime):
sample = data1[int(rate0*i):int(rate0*(i+length))][0:to_compare.shape[0]]
try:
assert(to_compare.shape[0]==sample.shape[0])
except:
print("Shape mismatch at %s" %str(i))
try:
rs.append(np.corrcoef(to_compare,sample)[0][1])
ts.append(i)
except:
pass
allrs.append(rs)
# offset_r = ts[np.argmax(rs)] + search_start
offset_r = ts[np.argmax(rs)]
self.offsets.append(offset_r)
write_offset_to_file(afile, offset_r,header='corr_fps'+str(fps)+'_len'+str(length)+'_start'+str(search_start)+'_end'+str(search_end))
return allrs
def find_offset_corr_sparse(self,length=5,search_start=0,search_end=20,fps=44100,sparse_ratio=.5,verbose=True):
'''
Finds offset by correlation with sparse sampling.
Input
------------
self.target_audio : Original audio to which other files will be aligned to
self.audio_files : List of audio files that needs to be trimmed
length : length of original sample to compare
search_start, search_end: start and end times to search for alignment in seconds
fps: level of temporal precision, default 44100
sparse_ratio = Determines the sparse sampling of the target audio to match (default is .5)
verbose: if True, prints the currently processing audio filename
Output
------------
offset_r : time to trim based on correlation
offset_d : time to trim based on distance
rs: correlation values
ds: difference values
'''
assert(self.target_audio is not None), 'Target audio not specified'
assert(self.audio_files is not None), 'Audio files not specified'
self.offsets = []
allrs = []
rate0,data0 = wav.read(self.target_audio)
for i, afile in enumerate(self.audio_files):
if verbose:
print(afile)
rate1,data1 = wav.read(afile)
assert(rate0==rate1), "Audio sampling rate is not the same for target and sample" # Check if they have same rate
searchtime = search_end-search_start # seconds to search alignment
if np.ndim(data0)>1:
data0 = data0[:,0]
if np.ndim(data1)>1:
data1 = data1[:,0]
# to_compare = data0[0:rate0*length]
sampleix = list(range(0,int(rate0*length)-1))
np.random.shuffle(sampleix)
sampleix = np.sort(sampleix[0:int(rate0*length*sparse_ratio)])
to_compare = data0[sampleix]
try:
assert(data1.shape[0] - (searchtime+length)*rate0 >= 0)
except:
print("Original length need to be shorter or reduce searchtime to allow buffer at end.")
rs = []
ts = []
# for i in np.linspace(0,searchtime,fps*searchtime):
for i in np.linspace(search_start,search_end,fps*searchtime):
# sample = data1[int(rate0*i):int(rate0*(i+length))][0:to_compare.shape[0]]
sample = data1[int(rate0*i):int(rate0*(i+length))][sampleix]
try:
assert(to_compare.shape[0]==sample.shape[0])
except:
print("Shape mismatch at %s" %str(i))
try:
rs.append(np.corrcoef(to_compare,sample)[0][1])
ts.append(i)
except:
pass
allrs.append(rs)
# offset_r = ts[np.argmax(rs)] + search_start
offset_r = ts[np.argmax(rs)]
self.offsets.append(offset_r)
write_offset_to_file(afile, offset_r, header='corr_sparse_fps'+str(fps)+'_len'+str(length)+'_start'+str(search_start)+'_end'+str(search_end))
return allrs
def find_offset_corr_multi(self,length=5,search_start=0,search_end=20,fps=44100,verbose=True):
'''
Find offset based on correlation with multiprocessing.
Requires joblib package.
Input
------------
self.target_audio : Original audio to which other files will be aligned to
self.audio_files : List of audio files that needs to be trimmed
length : length of original sample to compare
search_start, search_end: start and end times to search for alignment in seconds
fps: level of temporal precision, default 44100
verbose: if True, prints the currently processing audio filename
Output
------------
self.offsets: max offsets
rs: correlation values
'''
from joblib import Parallel, delayed
import multiprocessing
num_cores = multiprocessing.cpu_count()-1 # don't use all cores
assert(self.target_audio is not None), 'Target audio not specified'
assert(self.audio_files is not None), 'Audio files not specified'
self.offsets = []
allrs = []
rate0,data0 = wav.read(self.target_audio)
for i, afile in enumerate(self.audio_files):
if verbose:
print(afile)
rate1,data1 = wav.read(afile)
assert(rate0==rate1), "Audio sampling rate is not the same for target and sample" # Check if they have same rate
searchtime = search_end-search_start # seconds to search alignment
if np.ndim(data0)>1:
data0 = data0[:,0]
if np.ndim(data1)>1:
data1 = data1[:,0]
to_compare = data0[0:rate0*length]
try:
assert(data1.shape[0] - (searchtime+length)*rate0 >= 0)
except:
print("Original length need to be shorter or reduce searchtime to allow buffer at end.")
rs = []
ts = []
out = Parallel(n_jobs=num_cores,backend='threading')(delayed(calc_rs)(i,to_compare,data1[int(rate0*i):int(rate0*(i+length))][0:to_compare.shape[0]]) for i in np.linspace(search_start,search_end,fps*searchtime))
rs,ts= zip(*out)
allrs.append(rs)
offset_r = ts[np.argmax(rs)]
self.offsets.append(offset_r)
write_offset_to_file(afile, offset_r,header='corr_fps'+str(fps)+'_len'+str(length)+'_start'+str(search_start)+'_end'+str(search_end))
return allrs
def find_offset_dist(self,length=5,search_start=0,search_end=20,fps=44100,verbose=True):
'''
Find offset based on squared distance of audio wave.
Input
------------
self.target_audio : Original audio to which other files will be aligned to
self.audio_files : List of audio files that needs to be trimmed
length : length of original sample to compare
search_start, search_end: start and end times to search for alignment in seconds
fps: level of temporal precision, default 44100
verbose: if True, prints the currently processing audio filename
Output
------------
offset_d : time to trim based on distance
rs: correlation values
ds: difference values
'''
assert(self.target_audio is not None), 'Target audio not specified'
assert(self.audio_files is not None), 'Audio files not specified'
self.offsets = []
allds = []
rate0,data0 = wav.read(self.target_audio)
for i, afile in enumerate(self.audio_files):
if verbose:
print(afile)
rate1,data1 = wav.read(afile)
assert(rate0==rate1), "Audio sampling rate is not the same for target and sample" # Check if they have same rate
searchtime = search_end-search_start # seconds to search alignment
if np.ndim(data0)>1:
data0 = data0[:,0]
if np.ndim(data1)>1:
data1 = data1[:,0]
to_compare = data0[0:rate0*length]
try:
assert(data1.shape[0] - (searchtime+length)*rate0 >= 0)
except:
print("Original length need to be shorter or reduce searchtime to allow buffer at end.")
ds = []
ts = []
# for i in np.linspace(0,searchtime,fps*searchtime):
for i in np.linspace(search_start,search_end,fps*searchtime):
sample = data1[int(rate0*i):int(rate0*(i+length))][0:to_compare.shape[0]]
try:
assert(to_compare.shape[0]==sample.shape[0])
except:
print("Shape mismatch at %s" %str(i))
try:
ds.append(sum((to_compare-sample)**2))
ts.append(i)
except:
pass
allds.append(ds)
# offset_d = ts[np.argmin(ds)] + search_start
offset_d = ts[np.argmin(ds)]
self.offsets.append(offset_d)
write_offset_to_file(afile, offset_d,header='dist_fps'+str(fps)+'_len'+str(length)+'_start'+str(search_start)+'_end'+str(search_end))
return allds
def resize_vids(self, resolution = 64, suffix = None,call = True, force=False):
'''
Resize videos.
Inputs
------------
resolution: height of the video
suffix: what to name the resized video. If not specified, will append video names with resolution
call: boolean, whether to wait for each process to finish or open multiple threads,
True: call, False: multithread, default is call
force: whether to force creating new files some video files are already at the desired resolution; defaults to False
'''
if suffix == None:
suffix = str(resolution)
out = []
for vidfile in self.video_files:
(path2fname, vname) = os.path.split(vidfile)
print("Resizing video: %s" % (vname))
current_resolution = _get_vid_resolution(vidfile)
if current_resolution[1] == resolution and not force:
print("Native resolution already ok, skipping: %s" % (vname))
final_vidname = os.path.join(path2fname,vname)
out.append(final_vidname)
continue
else:
final_vidname = os.path.join(path2fname,vname.split('.')[0]+'_'+suffix+'.'+vname.split('.')[-1])
out.append(final_vidname)
command = 'ffmpeg -y -i ' + vidfile + ' -vf scale=-1:'+str(resolution)+' '+final_vidname
if not os.path.exists(final_vidname):
if call:
subprocess.call(command, shell=True)
else:
subprocess.Popen(command, shell=True)
return out
def concat_vids(self, final_vidname = None, resolution_fix=False, checkres=True):
'''
Concatenate list of videos to one video.
Inputs
------------
final_vidname = Filepath/filname of the concatenated video. If not specified will use the first video name appended with _all
'''
assert(len(self.video_files)!=0),'No video files to process'
if (final_vidname != None):
self.final_vidname = final_vidname
if (len(self.video_files)!=0) and (final_vidname == None):
(path2fname, vname) = os.path.split(self.video_files[0])
self.final_vidname = os.path.join(path2fname,vname.split('.')[0]+'_all.'+vname.split('.')[-1])
assert(type(self.final_vidname)==str),'final_vidname must be a string with full path'
#Check that files are all of the same resolution
if checkres:
resolutions = [_get_vid_resolution(elem) for elem in self.video_files]
if len(set(resolutions)) > 1:
if resolution_fix:
min_resolution = min([elem[1] for elem in resolutions])
print("Videos mismatch in resolution, resizing to: %s..." % (min_resolution))
new_vids= self.resize_vids(resolution=min_resolution)
self.video_files = new_vids
resolutions = [_get_vid_resolution(elem) for elem in self.video_files]
assert(len(set(resolutions))<=1),"Videos still mismatched. Something went wrong with automatic resizing? Try resizing manually."
print("Resizing complete. Continuing.")
else:
raise TypeError("Video files have different resolutions!")
# Create intermediate video files
tempfiles = str();
for i, vidfile in enumerate(self.video_files):
(path2fname, vname) = os.path.split(vidfile)
print("Joining video: %s" % (vname))
if len(tempfiles)!=0:
tempfiles = tempfiles+"|"
intermediatefile = os.path.join(path2fname,"intermediate"+str(i)+'.ts')
if not os.path.exists(intermediatefile):
command = "ffmpeg -i "+ vidfile +" -c copy -bsf:v h264_mp4toannexb -f mpegts " + intermediatefile
subprocess.call(command, shell=True)
tempfiles = tempfiles + intermediatefile
# Concatenate videos
command = 'ffmpeg -y -i "concat:' + tempfiles + '" -c copy -bsf:a aac_adtstoasc '+ self.final_vidname
subprocess.call(command, shell=True)
#remove intermediates
for i, vidfile in enumerate(self.video_files):
(path2fname, vname) = os.path.split(vidfile)
intermediatefile = os.path.join(path2fname,"intermediate"+str(i)+'.ts')
command = "rm -f " + intermediatefile
subprocess.call(command, shell=True)
def trim_vids(self,offsets = None, suffix = None,call=True):
'''
Trims video based on offset
Inputs
------------
offsets: list of offsets to trim the self.video_files with
length of offsets should match length of self.video_files
suffix: string to add to end of the trimmed video, default: 'trimmed'
call: boolean, whether to wait for each process to finish or open multiple threads,
True: call, False: multithread, default is call
'''
if suffix == None:
suffix = 'trimmed'
if offsets is not None:
self.offsets= offsets
assert(len(self.video_files)==len(self.offsets)),'Number of videos and number of offsets should match'
for i,vidfile in enumerate(self.video_files):
seconds = str(self.offsets[i])
(path2fname, vname) = os.path.split(vidfile)
print("Trimming video: %s" % (vname))
final_vidname = os.path.join(path2fname,vname.split('.')[0]+'_'+suffix+'.'+vname.split('.')[-1])
# command = 'ffmpeg -y -ss ' + str(seconds) + ' -i ' + vidfile + ' -c copy ' + final_vidname
# command = 'ffmpeg -y -ss ' + seconds.split('.')[0] + ' -i ' + vidfile + ' -ss 00:00:00.' + seconds.split('.')[1] + ' -c copy ' + final_vidname
command = 'ffmpeg -y -i ' + vidfile + ' -ss ' + str(seconds) + ' -crf 23 ' + final_vidname
# command = 'ffmpeg -y -i ' + vidfile + ' -ss ' + str(seconds) + ' -vcodec libx264 -crf 23 -acodec copy ' + final_vidname
if call:
subprocess.call(command, shell=True)
else:
subprocess.Popen(command, shell=True)
| {
"content_hash": "371723d930c2311381b9176a5177bfed",
"timestamp": "",
"source": "github",
"line_count": 546,
"max_line_length": 222,
"avg_line_length": 45.34249084249084,
"alnum_prop": 0.5789069758048229,
"repo_name": "jcheong0428/facesync",
"id": "aeb2a5a898399bb9f6529176d71b1ef79b8b3352",
"size": "24757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "facesync/facesync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45283"
}
],
"symlink_target": ""
} |
import os
import six
from django import VERSION
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlquote
from queued_storage.conf import settings
from queued_storage.utils import import_attribute
if VERSION[1] >= 7:
from django.utils.deconstruct import deconstructible
class LazyBackend(SimpleLazyObject):
def __init__(self, import_path, options):
backend = import_attribute(import_path)
super(LazyBackend, self).__init__(lambda: backend(**options))
class QueuedStorage(object):
"""
Base class for queued storages. You can use this to specify your own
backends.
:param local: local storage class to transfer from
:type local: str
:param local_options: options of the local storage class
:type local_options: dict
:param remote: remote storage class to transfer to
:type remote: str
:param remote_options: options of the remote storage class
:type remote_options: dict
:param cache_prefix: prefix to use in the cache key
:type cache_prefix: str
:param delayed: whether the transfer task should be executed automatically
:type delayed: bool
:param task: Celery task to use for the transfer
:type task: str
"""
#: The local storage class to use. A dotted path (e.g.
#: ``'django.core.files.storage.FileSystemStorage'``).
local = None
#: The options of the local storage class, defined as a dictionary.
local_options = None
#: The remote storage class to use. A dotted path (e.g.
#: ``'django.core.files.storage.FileSystemStorage'``).
remote = None
#: The options of the remote storage class, defined as a dictionary.
remote_options = None
#: The Celery task class to use to transfer files from the local
#: to the remote storage. A dotted path (e.g.
#: ``'queued_storage.tasks.Transfer'``).
task = 'queued_storage.tasks.Transfer'
#: If set to ``True`` the backend will *not* transfer files to the remote
#: location automatically, but instead requires manual intervention by the
#: user with the :meth:`~queued_storage.backends.QueuedStorage.transfer`
#: method.
delayed = False
#: The cache key prefix to use when saving the which storage backend
#: to use, local or remote (default see
#: :attr:`~queued_storage.conf.settings.QUEUED_STORAGE_CACHE_PREFIX`)
cache_prefix = settings.QUEUED_STORAGE_CACHE_PREFIX
def __init__(self, local=None, remote=None,
local_options=None, remote_options=None,
cache_prefix=None, delayed=None, task=None):
self.local_path = local or self.local
self.local_options = local_options or self.local_options or {}
self.local = self._load_backend(backend=self.local_path,
options=self.local_options)
self.remote_path = remote or self.remote
self.remote_options = remote_options or self.remote_options or {}
self.remote = self._load_backend(backend=self.remote_path,
options=self.remote_options)
# Using the mis-named _load_backend method to get the task instance from the task name string (both self.task...),
# using the tricky method import_attribute. Hard to read. Hard to test...
self.task = self._load_backend(backend=task or self.task,
handler=import_attribute)
if delayed is not None:
self.delayed = delayed
if cache_prefix is not None:
self.cache_prefix = cache_prefix
def _load_backend(self, backend=None, options=None, handler=LazyBackend):
if backend is None: # pragma: no cover
raise ImproperlyConfigured("The QueuedStorage class '%s' "
"doesn't define a needed backend." % (self))
if not isinstance(backend, six.string_types):
raise ImproperlyConfigured("The QueuedStorage class '%s' "
"requires its backends to be "
"specified as dotted import paths "
"not instances or classes" % self)
return handler(backend, options)
def get_storage(self, name):
"""
Returns the storage backend instance responsible for the file
with the given name (either local or remote). This method is
used in most of the storage API methods.
:param name: file name
:type name: str
:rtype: :class:`~django:django.core.files.storage.Storage`
"""
cache_result = cache.get(self.get_cache_key(name))
if cache_result:
return self.remote
elif cache_result is None and self.remote.exists(name):
cache.set(self.get_cache_key(name), True)
return self.remote
else:
return self.local
def get_cache_key(self, name):
"""
Returns the cache key for the given file name.
:param name: file name
:type name: str
:rtype: str
"""
return '%s_%s' % (self.cache_prefix, urlquote(name))
def using_local(self, name):
"""
Determines for the file with the given name whether
the local storage is current used.
:param name: file name
:type name: str
:rtype: bool
"""
return self.get_storage(name) is self.local
def using_remote(self, name):
"""
Determines for the file with the given name whether
the remote storage is current used.
:param name: file name
:type name: str
:rtype: bool
"""
return self.get_storage(name) is self.remote
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
:param name: file name
:type name: str
:param mode: mode to open the file with
:type mode: str
:rtype: :class:`~django:django.core.files.File`
"""
return self.get_storage(name).open(name, mode)
def save(self, name, content, max_length=None):
"""
Saves the given content with the given name using the local
storage. If the :attr:`~queued_storage.backends.QueuedStorage.delayed`
attribute is ``True`` this will automatically call the
:meth:`~queued_storage.backends.QueuedStorage.transfer` method
queuing the transfer from local to remote storage.
:param name: file name
:type name: str
:param content: content of the file specified by name
:type content: :class:`~django:django.core.files.File`
:param max_length: The length of the filename will not exceed
`max_length`, if provided.
:type max_length: int
:rtype: str
"""
cache_key = self.get_cache_key(name)
cache.set(cache_key, False)
# Use a name that is available on both the local and remote storage
# systems and save locally.
name = self.get_available_name(name, max_length)
name = self.local.save(name, content)
# Pass on the cache key to prevent duplicate cache key creation,
# we save the result in the storage to be able to test for it
if not self.delayed:
self.result = self.transfer(name, cache_key=cache_key)
return name
def transfer(self, name, cache_key=None):
"""
Transfers the file with the given name to the remote storage
backend by queuing the task.
:param name: file name
:type name: str
:param cache_key: the cache key to set after a successful task run
:type cache_key: str
:rtype: task result
"""
if cache_key is None:
cache_key = self.get_cache_key(name)
return self.task.delay(name, cache_key) # , self.local, self.remote
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable
for use in the current storage system.
:param name: file name
:type name: str
:rtype: str
"""
return self.get_storage(name).get_valid_name(name)
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on both the local and remote storage
systems, and available for new content to be written to.
:param name: file name
:type name: str
:rtype: str
"""
local_available_name = self.local.get_available_name(name)
remote_available_name = self.remote.get_available_name(name)
if remote_available_name > local_available_name:
return remote_available_name
return local_available_name
def generate_filename(self, filename):
"""
Validate the filename by calling get_valid_name() and return a filename
to be passed to the save() method.
"""
# `filename` may include a path as returned by FileField.upload_to.
dirname, filename = os.path.split(filename)
return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
:param name: file name
:type name: str
:rtype: str
"""
return self.get_storage(name).path(name)
def delete(self, name):
"""
Deletes the specified file from the storage system.
:param name: file name
:type name: str
"""
return self.get_storage(name).delete(name)
def exists(self, name):
"""
Returns ``True`` if a file referened by the given name already exists
in the storage system, or False if the name is available for a new
file.
:param name: file name
:type name: str
:rtype: bool
"""
return self.get_storage(name).exists(name)
def listdir(self, name):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
:param name: file name
:type name: str
:rtype: tuple
"""
return self.get_storage(name).listdir(name)
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
:param name: file name
:type name: str
:rtype: int
"""
return self.get_storage(name).size(name)
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
:param name: file name
:type name: str
:rtype: str
"""
return self.get_storage(name).url(name)
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
:param name: file name
:type name: str
:rtype: :class:`~python:datetime.datetime`
"""
if VERSION[1] >= 10:
return self.get_storage(name).get_accessed_time(name)
else:
return self.get_storage(name).accessed_time(name)
def get_accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
:param name: file name
:type name: str
:rtype: :class:`~python:datetime.datetime`
"""
return self.accessed_time(name)
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
:param name: file name
:type name: str
:rtype: :class:`~python:datetime.datetime`
"""
if VERSION[1] >= 10:
return self.get_storage(name).get_created_time(name)
else:
return self.get_storage(name).created_time(name)
def get_created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
:param name: file name
:type name: str
:rtype: :class:`~python:datetime.datetime`
"""
return self.created_time(name)
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
:param name: file name
:type name: str
:rtype: :class:`~python:datetime.datetime`
"""
if VERSION[1] >= 10:
return self.get_storage(name).get_modified_time(name)
else:
return self.get_storage(name).modified_time(name)
def get_modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
:param name: file name
:type name: str
:rtype: :class:`~python:datetime.datetime`
"""
return self.modified_time(name)
if VERSION[1] >= 7:
QueuedStorage = deconstructible(QueuedStorage)
class QueuedFileSystemStorage(QueuedStorage):
"""
A :class:`~queued_storage.backends.QueuedStorage` subclass which
conveniently uses
:class:`~django:django.core.files.storage.FileSystemStorage` as the local
storage.
"""
def __init__(self, local='django.core.files.storage.FileSystemStorage', *args, **kwargs):
super(QueuedFileSystemStorage, self).__init__(local=local, *args, **kwargs)
class QueuedS3BotoStorage(QueuedFileSystemStorage):
"""
A custom :class:`~queued_storage.backends.QueuedFileSystemStorage`
subclass which uses the ``S3BotoStorage`` storage of the
`django-storages <https://django-storages.readthedocs.io/>`_ app as
the remote storage.
"""
def __init__(self, remote='storages.backends.s3boto3.S3Boto3Storage', *args, **kwargs):
super(QueuedS3BotoStorage, self).__init__(remote=remote, *args, **kwargs)
class QueuedCouchDBStorage(QueuedFileSystemStorage):
"""
A custom :class:`~queued_storage.backends.QueuedFileSystemStorage`
subclass which uses the ``CouchDBStorage`` storage of the
`django-storages <https://django-storages.readthedocs.io/>`_ app as
the remote storage.
"""
def __init__(self, remote='storages.backends.couchdb.CouchDBStorage', *args, **kwargs):
super(QueuedCouchDBStorage, self).__init__(remote=remote, *args, **kwargs)
class QueuedDatabaseStorage(QueuedFileSystemStorage):
"""
A custom :class:`~queued_storage.backends.QueuedFileSystemStorage`
subclass which uses the ``DatabaseStorage`` storage of the
`django-storages <https://django-storages.readthedocs.io/>`_ app as
the remote storage.
"""
def __init__(self, remote='storages.backends.database.DatabaseStorage', *args, **kwargs):
super(QueuedDatabaseStorage, self).__init__(remote=remote, *args, **kwargs)
class QueuedFTPStorage(QueuedFileSystemStorage):
"""
A custom :class:`~queued_storage.backends.QueuedFileSystemStorage`
subclass which uses the ``FTPStorage`` storage of the
`django-storages <https://django-storages.readthedocs.io/>`_ app as
the remote storage.
"""
def __init__(self, remote='storages.backends.ftp.FTPStorage', *args, **kwargs):
super(QueuedFTPStorage, self).__init__(remote=remote, *args, **kwargs)
class QueuedMogileFSStorage(QueuedFileSystemStorage):
"""
A custom :class:`~queued_storage.backends.QueuedFileSystemStorage`
subclass which uses the ``MogileFSStorage`` storage of the
`django-storages <https://django-storages.readthedocs.io/>`_ app as
the remote storage.
"""
def __init__(self, remote='storages.backends.mogile.MogileFSStorage', *args, **kwargs):
super(QueuedMogileFSStorage, self).__init__(remote=remote, *args, **kwargs)
class QueuedGridFSStorage(QueuedFileSystemStorage):
"""
A custom :class:`~queued_storage.backends.QueuedFileSystemStorage`
subclass which uses the ``GridFSStorage`` storage of the
`django-storages <https://django-storages.readthedocs.io/>`_ app as
the remote storage.
"""
def __init__(self, remote='storages.backends.mongodb.GridFSStorage', *args, **kwargs):
super(QueuedGridFSStorage, self).__init__(remote=remote, *args, **kwargs)
class QueuedCloudFilesStorage(QueuedFileSystemStorage):
"""
A custom :class:`~queued_storage.backends.QueuedFileSystemStorage`
subclass which uses the ``CloudFilesStorage`` storage of the
`django-storages <https://django-storages.readthedocs.io/>`_ app as
the remote storage.
"""
def __init__(self, remote='storages.backends.mosso.CloudFilesStorage', *args, **kwargs):
super(QueuedCloudFilesStorage, self).__init__(remote=remote, *args, **kwargs)
class QueuedSFTPStorage(QueuedFileSystemStorage):
"""
A custom :class:`~queued_storage.backends.QueuedFileSystemStorage`
subclass which uses the ``SFTPStorage`` storage of the
`django-storages <https://django-storages.readthedocs.io/>`_ app as
the remote storage.
"""
def __init__(self, remote='storages.backends.sftpstorage.SFTPStorage', *args, **kwargs):
super(QueuedSFTPStorage, self).__init__(remote=remote, *args, **kwargs)
| {
"content_hash": "94b55439e20217db7c6174d4c617faf0",
"timestamp": "",
"source": "github",
"line_count": 503,
"max_line_length": 122,
"avg_line_length": 35.20079522862823,
"alnum_prop": 0.6310855077374902,
"repo_name": "melfelr/django-queued-storage",
"id": "b926dd12a6f8d130b82fe55dc7d9c2a8972c3097",
"size": "17706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "queued_storage/backends.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "103"
},
{
"name": "Python",
"bytes": "40934"
}
],
"symlink_target": ""
} |
import types
from pyrepl import unicodedata_
from pyrepl import commands
from pyrepl import input
def _make_unctrl_map():
uc_map = {}
for c in map(unichr, range(256)):
if unicodedata_.category(c)[0] <> 'C':
uc_map[c] = c
for i in range(32):
c = unichr(i)
uc_map[c] = u'^' + unichr(ord('A') + i - 1)
uc_map['\t'] = ' ' # display TABs as 4 characters
uc_map['\177'] = u'^?'
for i in range(256):
c = unichr(i)
if not uc_map.has_key(c):
uc_map[c] = u'\\%03o'%i
return uc_map
# disp_str proved to be a bottleneck for large inputs, so it's been
# rewritten in C; it's not required though.
try:
raise ImportError # currently it's borked by the unicode support
from _pyrepl_utils import disp_str, init_unctrl_map
init_unctrl_map(_make_unctrl_map())
del init_unctrl_map
except ImportError:
def _my_unctrl(c, u=_make_unctrl_map()):
if c in u:
return u[c]
else:
if unicodedata_.category(c).startswith('C'):
return '\u%04x'%(ord(c),)
else:
return c
def disp_str(buffer, join=''.join, uc=_my_unctrl):
""" disp_str(buffer:string) -> (string, [int])
Return the string that should be the printed represenation of
|buffer| and a list detailing where the characters of |buffer|
get used up. E.g.:
>>> disp_str(chr(3))
('^C', [1, 0])
the list always contains 0s or 1s at present; it could conceivably
go higher as and when unicode support happens."""
s = map(uc, buffer)
return (join(s),
map(ord, join(map(lambda x:'\001'+(len(x)-1)*'\000', s))))
del _my_unctrl
del _make_unctrl_map
# syntax classes:
[SYNTAX_WHITESPACE,
SYNTAX_WORD,
SYNTAX_SYMBOL] = range(3)
def make_default_syntax_table():
# XXX perhaps should use some unicodedata here?
st = {}
for c in map(unichr, range(256)):
st[c] = SYNTAX_SYMBOL
for c in [a for a in map(unichr, range(256)) if a.isalpha()]:
st[c] = SYNTAX_WORD
st[u'\n'] = st[u' '] = SYNTAX_WHITESPACE
return st
default_keymap = tuple(
[(r'\C-a', 'beginning-of-line'),
(r'\C-b', 'left'),
(r'\C-c', 'interrupt'),
(r'\C-d', 'delete'),
(r'\C-e', 'end-of-line'),
(r'\C-f', 'right'),
(r'\C-g', 'cancel'),
(r'\C-h', 'backspace'),
(r'\C-j', 'accept'),
(r'\<return>', 'accept'),
(r'\C-k', 'kill-line'),
(r'\C-l', 'clear-screen'),
(r'\C-m', 'accept'),
(r'\C-q', 'quoted-insert'),
(r'\C-t', 'transpose-characters'),
(r'\C-u', 'unix-line-discard'),
(r'\C-v', 'quoted-insert'),
(r'\C-w', 'unix-word-rubout'),
(r'\C-x\C-u', 'upcase-region'),
(r'\C-y', 'yank'),
(r'\C-z', 'suspend'),
(r'\M-b', 'backward-word'),
(r'\M-c', 'capitalize-word'),
(r'\M-d', 'kill-word'),
(r'\M-f', 'forward-word'),
(r'\M-l', 'downcase-word'),
(r'\M-t', 'transpose-words'),
(r'\M-u', 'upcase-word'),
(r'\M-y', 'yank-pop'),
(r'\M--', 'digit-arg'),
(r'\M-0', 'digit-arg'),
(r'\M-1', 'digit-arg'),
(r'\M-2', 'digit-arg'),
(r'\M-3', 'digit-arg'),
(r'\M-4', 'digit-arg'),
(r'\M-5', 'digit-arg'),
(r'\M-6', 'digit-arg'),
(r'\M-7', 'digit-arg'),
(r'\M-8', 'digit-arg'),
(r'\M-9', 'digit-arg'),
#(r'\M-\n', 'insert-nl'),
('\\\\', 'self-insert')] + \
[(c, 'self-insert')
for c in map(chr, range(32, 127)) if c <> '\\'] + \
[(c, 'self-insert')
for c in map(chr, range(128, 256)) if c.isalpha()] + \
[(r'\<up>', 'up'),
(r'\<down>', 'down'),
(r'\<left>', 'left'),
(r'\<right>', 'right'),
(r'\<insert>', 'quoted-insert'),
(r'\<delete>', 'delete'),
(r'\<backspace>', 'backspace'),
(r'\M-\<backspace>', 'backward-kill-word'),
(r'\<end>', 'end-of-line'), # was 'end'
(r'\<home>', 'beginning-of-line'), # was 'home'
(r'\<f1>', 'help'),
(r'\EOF', 'end'), # the entries in the terminfo database for xterms
(r'\EOH', 'home'), # seem to be wrong. this is a less than ideal
# workaround
])
del c # from the listcomps
class Reader(object):
"""The Reader class implements the bare bones of a command reader,
handling such details as editing and cursor motion. What it does
not support are such things as completion or history support -
these are implemented elsewhere.
Instance variables of note include:
* buffer:
A *list* (*not* a string at the moment :-) containing all the
characters that have been entered.
* console:
Hopefully encapsulates the OS dependent stuff.
* pos:
A 0-based index into `buffer' for where the insertion point
is.
* screeninfo:
Ahem. This list contains some info needed to move the
insertion point around reasonably efficiently. I'd like to
get rid of it, because its contents are obtuse (to put it
mildly) but I haven't worked out if that is possible yet.
* cxy, lxy:
the position of the insertion point in screen ... XXX
* syntax_table:
Dictionary mapping characters to `syntax class'; read the
emacs docs to see what this means :-)
* commands:
Dictionary mapping command names to command classes.
* arg:
The emacs-style prefix argument. It will be None if no such
argument has been provided.
* dirty:
True if we need to refresh the display.
* kill_ring:
The emacs-style kill-ring; manipulated with yank & yank-pop
* ps1, ps2, ps3, ps4:
prompts. ps1 is the prompt for a one-line input; for a
multiline input it looks like:
ps2> first line of input goes here
ps3> second and further
ps3> lines get ps3
...
ps4> and the last one gets ps4
As with the usual top-level, you can set these to instances if
you like; str() will be called on them (once) at the beginning
of each command. Don't put really long or newline containing
strings here, please!
This is just the default policy; you can change it freely by
overriding get_prompt() (and indeed some standard subclasses
do).
* finished:
handle1 will set this to a true value if a command signals
that we're done.
"""
help_text = """\
This is pyrepl. Hear my roar.
Helpful text may appear here at some point in the future when I'm
feeling more loquacious than I am now."""
msg_at_bottom = True
def __init__(self, console):
self.buffer = []
self.ps1 = "->> "
self.ps2 = "/>> "
self.ps3 = "|.. "
self.ps4 = "\__ "
self.kill_ring = []
self.arg = None
self.finished = 0
self.console = console
self.commands = {}
self.msg = ''
for v in vars(commands).values():
if ( isinstance(v, type)
and issubclass(v, commands.Command)
and v.__name__[0].islower() ):
self.commands[v.__name__] = v
self.commands[v.__name__.replace('_', '-')] = v
self.syntax_table = make_default_syntax_table()
self.input_trans_stack = []
self.keymap = self.collect_keymap()
self.input_trans = input.KeymapTranslator(
self.keymap,
invalid_cls='invalid-key',
character_cls='self-insert')
def collect_keymap(self):
return default_keymap
def calc_screen(self):
"""The purpose of this method is to translate changes in
self.buffer into changes in self.screen. Currently it rips
everything down and starts from scratch, which whilst not
especially efficient is certainly simple(r).
"""
lines = self.get_unicode().split("\n")
screen = []
screeninfo = []
w = self.console.width - 1
p = self.pos
for ln, line in zip(range(len(lines)), lines):
ll = len(line)
if 0 <= p <= ll:
if self.msg and not self.msg_at_bottom:
for mline in self.msg.split("\n"):
screen.append(mline)
screeninfo.append((0, []))
self.lxy = p, ln
prompt = self.get_prompt(ln, ll >= p >= 0)
while '\n' in prompt:
pre_prompt, _, prompt = prompt.partition('\n')
screen.append(pre_prompt)
screeninfo.append((0, []))
p -= ll + 1
prompt, lp = self.process_prompt(prompt)
l, l2 = disp_str(line)
wrapcount = (len(l) + lp) / w
if wrapcount == 0:
screen.append(prompt + l)
screeninfo.append((lp, l2+[1]))
else:
screen.append(prompt + l[:w-lp] + "\\")
screeninfo.append((lp, l2[:w-lp]))
for i in range(-lp + w, -lp + wrapcount*w, w):
screen.append(l[i:i+w] + "\\")
screeninfo.append((0, l2[i:i + w]))
screen.append(l[wrapcount*w - lp:])
screeninfo.append((0, l2[wrapcount*w - lp:]+[1]))
self.screeninfo = screeninfo
self.cxy = self.pos2xy(self.pos)
if self.msg and self.msg_at_bottom:
for mline in self.msg.split("\n"):
screen.append(mline)
screeninfo.append((0, []))
return screen
def process_prompt(self, prompt):
""" Process the prompt.
This means calculate the length of the prompt. The character \x01
and \x02 are used to bracket ANSI control sequences and need to be
excluded from the length calculation. So also a copy of the prompt
is returned with these control characters removed. """
out_prompt = ''
l = len(prompt)
pos = 0
while True:
s = prompt.find('\x01', pos)
if s == -1:
break
e = prompt.find('\x02', s)
if e == -1:
break
# Found start and end brackets, subtract from string length
l = l - (e-s+1)
out_prompt += prompt[pos:s] + prompt[s+1:e]
pos = e+1
out_prompt += prompt[pos:]
return out_prompt, l
def bow(self, p=None):
"""Return the 0-based index of the word break preceding p most
immediately.
p defaults to self.pos; word boundaries are determined using
self.syntax_table."""
if p is None:
p = self.pos
st = self.syntax_table
b = self.buffer
p -= 1
while p >= 0 and st.get(b[p], SYNTAX_WORD) <> SYNTAX_WORD:
p -= 1
while p >= 0 and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD:
p -= 1
return p + 1
def eow(self, p=None):
"""Return the 0-based index of the word break following p most
immediately.
p defaults to self.pos; word boundaries are determined using
self.syntax_table."""
if p is None:
p = self.pos
st = self.syntax_table
b = self.buffer
while p < len(b) and st.get(b[p], SYNTAX_WORD) <> SYNTAX_WORD:
p += 1
while p < len(b) and st.get(b[p], SYNTAX_WORD) == SYNTAX_WORD:
p += 1
return p
def bol(self, p=None):
"""Return the 0-based index of the line break preceding p most
immediately.
p defaults to self.pos."""
# XXX there are problems here.
if p is None:
p = self.pos
b = self.buffer
p -= 1
while p >= 0 and b[p] <> '\n':
p -= 1
return p + 1
def eol(self, p=None):
"""Return the 0-based index of the line break following p most
immediately.
p defaults to self.pos."""
if p is None:
p = self.pos
b = self.buffer
while p < len(b) and b[p] <> '\n':
p += 1
return p
def get_arg(self, default=1):
"""Return any prefix argument that the user has supplied,
returning `default' if there is None. `default' defaults
(groan) to 1."""
if self.arg is None:
return default
else:
return self.arg
def get_prompt(self, lineno, cursor_on_line):
"""Return what should be in the left-hand margin for line
`lineno'."""
if self.arg is not None and cursor_on_line:
return "(arg: %s) "%self.arg
if "\n" in self.buffer:
if lineno == 0:
res = self.ps2
elif lineno == self.buffer.count("\n"):
res = self.ps4
else:
res = self.ps3
else:
res = self.ps1
# Lazily call str() on self.psN, and cache the results using as key
# the object on which str() was called. This ensures that even if the
# same object is used e.g. for ps1 and ps2, str() is called only once.
if res not in self._pscache:
self._pscache[res] = str(res)
return self._pscache[res]
def push_input_trans(self, itrans):
self.input_trans_stack.append(self.input_trans)
self.input_trans = itrans
def pop_input_trans(self):
self.input_trans = self.input_trans_stack.pop()
def pos2xy(self, pos):
"""Return the x, y coordinates of position 'pos'."""
# this *is* incomprehensible, yes.
y = 0
assert 0 <= pos <= len(self.buffer)
if pos == len(self.buffer):
y = len(self.screeninfo) - 1
p, l2 = self.screeninfo[y]
return p + len(l2) - 1, y
else:
for p, l2 in self.screeninfo:
l = l2.count(1)
if l > pos:
break
else:
pos -= l
y += 1
c = 0
i = 0
while c < pos:
c += l2[i]
i += 1
while l2[i] == 0:
i += 1
return p + i, y
def insert(self, text):
"""Insert 'text' at the insertion point."""
self.buffer[self.pos:self.pos] = list(text)
self.pos += len(text)
self.dirty = 1
def update_cursor(self):
"""Move the cursor to reflect changes in self.pos"""
self.cxy = self.pos2xy(self.pos)
self.console.move_cursor(*self.cxy)
def after_command(self, cmd):
"""This function is called to allow post command cleanup."""
if getattr(cmd, "kills_digit_arg", 1):
if self.arg is not None:
self.dirty = 1
self.arg = None
def prepare(self):
"""Get ready to run. Call restore when finished. You must not
write to the console in between the calls to prepare and
restore."""
try:
self.console.prepare()
self.arg = None
self.screeninfo = []
self.finished = 0
del self.buffer[:]
self.pos = 0
self.dirty = 1
self.last_command = None
self._pscache = {}
except:
self.restore()
raise
def last_command_is(self, klass):
if not self.last_command:
return 0
return issubclass(klass, self.last_command)
def restore(self):
"""Clean up after a run."""
self.console.restore()
def finish(self):
"""Called when a command signals that we're finished."""
pass
def error(self, msg="none"):
self.msg = "! " + msg + " "
self.dirty = 1
self.console.beep()
def update_screen(self):
if self.dirty:
self.refresh()
def refresh(self):
"""Recalculate and refresh the screen."""
# this call sets up self.cxy, so call it first.
screen = self.calc_screen()
self.console.refresh(screen, self.cxy)
self.dirty = 0 # forgot this for a while (blush)
def do_cmd(self, cmd):
#print cmd
if isinstance(cmd[0], str):
cmd = self.commands.get(cmd[0],
commands.invalid_command)(self, cmd)
elif isinstance(cmd[0], type):
cmd = cmd[0](self, cmd)
cmd.do()
self.after_command(cmd)
if self.dirty:
self.refresh()
else:
self.update_cursor()
if not isinstance(cmd, commands.digit_arg):
self.last_command = cmd.__class__
self.finished = cmd.finish
if self.finished:
self.console.finish()
self.finish()
def handle1(self, block=1):
"""Handle a single event. Wait as long as it takes if block
is true (the default), otherwise return None if no event is
pending."""
if self.msg:
self.msg = ''
self.dirty = 1
while 1:
event = self.console.get_event(block)
if not event: # can only happen if we're not blocking
return None
translate = True
if event.evt == 'key':
self.input_trans.push(event)
elif event.evt == 'scroll':
self.refresh()
elif event.evt == 'resize':
self.refresh()
else:
translate = False
if translate:
cmd = self.input_trans.get()
else:
cmd = event.evt, event.data
if cmd is None:
if block:
continue
else:
return None
self.do_cmd(cmd)
return 1
def push_char(self, char):
self.console.push_char(char)
self.handle1(0)
def readline(self, returns_unicode=False, startup_hook=None):
"""Read a line. The implementation of this method also shows
how to drive Reader if you want more control over the event
loop."""
self.prepare()
try:
if startup_hook is not None:
startup_hook()
self.refresh()
while not self.finished:
self.handle1()
if returns_unicode:
return self.get_unicode()
return self.get_buffer()
finally:
self.restore()
def bind(self, spec, command):
self.keymap = self.keymap + ((spec, command),)
self.input_trans = input.KeymapTranslator(
self.keymap,
invalid_cls='invalid-key',
character_cls='self-insert')
def get_buffer(self, encoding=None):
if encoding is None:
encoding = self.console.encoding
return u''.join(self.buffer).encode(self.console.encoding)
def get_unicode(self):
"""Return the current buffer as a unicode string."""
return u''.join(self.buffer)
def test():
from pyrepl.unix_console import UnixConsole
reader = Reader(UnixConsole())
reader.ps1 = "**> "
reader.ps2 = "/*> "
reader.ps3 = "|*> "
reader.ps4 = "\*> "
while reader.readline():
pass
if __name__=='__main__':
test()
| {
"content_hash": "d124c2c8c1095d43f9e8375028b1a562",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 78,
"avg_line_length": 32.121911037891266,
"alnum_prop": 0.5184121448353677,
"repo_name": "ArneBab/pypyjs",
"id": "677279bce29195c834a10aefc983fa306731f954",
"size": "20461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "website/demo/home/rfk/repos/pypy/lib_pypy/pyrepl/reader.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
Line Chart with Logarithmic Scale
---------------------------------
How to make a line chart on a `Logarithmic scale <https://en.wikipedia.org/wiki/Logarithmic_scale>`_.
"""
# category: line charts
import altair as alt
from vega_datasets import data
source = data.population()
alt.Chart(source).mark_line().encode(
x='year:O',
y=alt.Y(
'sum(people)',
scale=alt.Scale(type="log") # Here the scale is applied
)
) | {
"content_hash": "7b165f11262a948cec0debae3dc04864",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 101,
"avg_line_length": 24.72222222222222,
"alnum_prop": 0.6202247191011236,
"repo_name": "jakevdp/altair",
"id": "b45fcd92ebd0a07a46e87a4afac896a85ea235d8",
"size": "445",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "altair/examples/line_with_log_scale.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "5353045"
},
{
"name": "TeX",
"bytes": "2684"
}
],
"symlink_target": ""
} |
__author__ = 'cosven'
import os
import json
import hashlib
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtNetwork import *
from base.network_manger import NetworkManager
from base.logger import LOG
from base.common import write_json_into_file
from constants import DATA_PATH
class LoginDialog(QDialog):
"""登录对话框
描述: 弹出登录对话框,用户输入用户名和密码,点击登录按钮调用login函数。
登录成功则发射("loginsuccess")信号,失败则显示相关提示信息
调用: 1. 在用户登录成功时,会发射("login_success")信号
"""
signal_login_sucess = pyqtSignal([dict], name='login_success')
pw_filename = "ne_un_pw.json"
def __init__(self, parent):
super(LoginDialog, self).__init__(parent)
self.username_label = QLabel()
self.password_label = QLabel()
self.hint_label = QLabel()
self.username_widget = QLineEdit()
self.password_widget = QLineEdit()
self.login_btn = QPushButton()
self.captcha_label = QLabel()
self.captcha_lineedit = QLineEdit()
self.layout = QVBoxLayout()
self.is_remember_chb = QCheckBox(u'记住账号')
self.nm = NetworkManager()
self.ne = parent.api
self.is_autofill = False
self.is_need_captcha = False
self.captcha_id = 0
self.user_data = 0
self.__set_signal_binding()
self.__set_widgets_prop()
self.__set_layouts_prop()
self.__set_me()
self.fill_content()
def __set_signal_binding(self):
self.login_btn.clicked.connect(self.__login)
self.password_widget.textChanged.connect(self.on_password_lineedit_changed)
def fill_content(self):
"""
判断之前是否保存了用户名和密码:
保存了就是直接加载
"""
if self.has_saved_userinfo():
try:
f = open(DATA_PATH + self.pw_filename, 'r')
login_data = json.load(f)
f.close()
if 'is_remember' in login_data.keys() and login_data['is_remember']:
self.username_widget.setText(str(login_data['username']))
self.password_widget.setText(str(login_data['password']))
self.is_remember_chb.setCheckState(2)
self.is_autofill = True
except Exception as e:
LOG.error(str(e))
def has_saved_userinfo(self):
"""判断之前是否有保存过的用户名与密码
:return:
"""
if os.path.exists(DATA_PATH + self.pw_filename):
return True
return False
def save_login_info(self, login_data):
if login_data['is_remember']:
try:
f = open(DATA_PATH + self.pw_filename, 'w')
if self.is_autofill is not True: # 如果不是自动填充,说明密码时已经没有加密过
password = login_data['password'].encode('utf-8')
login_data['password'] = hashlib.md5(password).hexdigest()
data_json = json.dumps(login_data)
write_json_into_file(data_json, f)
except Exception as e:
LOG.error(str(e))
else:
try:
os.remove(DATA_PATH + self.pw_filename)
except Exception as e:
LOG.warning(str(e))
def __login(self):
"""登录
在用户登录成功时,会发射("login_success")信号
"""
data = {}
if self.is_need_captcha is True:
captcha_text = str(self.captcha_lineedit.text())
flag, self.captcha_id = self.ne.confirm_captcha(self.captcha_id, captcha_text)
if flag is not True:
self.hint_label.setText(u'验证码错误')
url = self.ne.get_captcha_url(data['captchaId'])
request = QNetworkRequest(QUrl(url))
self.nm.get(request)
self.parent().network_queue.put(self.show_captcha)
return
phone_login = False # 0: 网易通行证, 1: 手机号登陆
username = str(self.username_widget.text()) # 包含中文会出错
password = str(self.password_widget.text())
# 2: checked, 1: partial checked
is_remember = self.is_remember_chb.checkState()
# judget if logining by using phone number
if username.isdigit() and len(username) == 11:
username = int(username)
phone_login = True
LOG.info(u"正在使用手机号进行登录")
login_data = {
'username': username,
'password': password,
'is_remember': is_remember
}
if not self.is_autofill:
data = self.ne.login(username, password, phone_login)
else:
data = self.ne.auto_login(username, password, phone_login)
if data['code'] == 200:
self.hint_label.setText(u'登录成功')
self.signal_login_sucess.emit(data)
self.close()
self.save_login_info(login_data)
elif data['code'] == 415: # 需要验证码
self.is_need_captcha = True
self.hint_label.setText(data['message'])
LOG.info(u'本次登陆需要验证码')
self.captcha_id = data['captchaId']
self.captcha_label.show()
self.captcha_lineedit.show()
url = self.ne.get_captcha_url(data['captchaId'])
request = QNetworkRequest(QUrl(url))
self.nm.get(request)
self.parent().network_queue.put(self.show_captcha)
elif data['code'] == 408:
self.hint_label.setText(u'网络连接超时')
elif data['code'] == 501:
self.hint_label.setText(u'用户名错误')
elif data['code'] == 502:
self.hint_label.setText(u'密码错误')
elif data['code'] == 509:
self.hint_label.setText(u'你可能正在使用手机号登陆,密码错误几次之后,你可能需要等待几分钟再登录')
else:
self.hint_label.setText(u'未知错误')
def show_captcha(self, res):
img = QImage()
img.loadFromData(res.readAll())
self.captcha_label.setPixmap(QPixmap(img))
@pyqtSlot()
def on_password_lineedit_changed(self):
self.is_autofill = False
def __set_me(self):
self.setObjectName('login_dialog')
self.setLayout(self.layout)
def __set_widgets_prop(self):
self.login_btn.setText(u'登录')
self.username_label.setText(u'网易邮箱或者手机号')
self.password_label.setText(u'密码')
self.username_widget.setPlaceholderText(u'请输入用户名')
self.password_widget.setPlaceholderText(u'请输入密码')
self.username_widget.setAttribute(Qt.WA_MacShowFocusRect, False)
self.password_widget.setAttribute(Qt.WA_MacShowFocusRect, False)
self.setAttribute(Qt.WA_MacShowFocusRect, False)
self.captcha_label.hide()
self.captcha_lineedit.hide()
self.password_widget.setEchoMode(QLineEdit.Password)
def __set_layouts_prop(self):
self.layout.addWidget(self.username_label)
self.layout.addWidget(self.username_widget)
self.layout.addWidget(self.password_label)
self.layout.addWidget(self.password_widget)
self.layout.addWidget(self.hint_label)
self.layout.addWidget(self.captcha_label)
self.layout.addWidget(self.captcha_lineedit)
self.layout.addWidget(self.is_remember_chb)
self.layout.addWidget(self.login_btn)
self.layout.addStretch(1)
| {
"content_hash": "0313a8231ae30badd557afc236311097",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 90,
"avg_line_length": 33.995327102803735,
"alnum_prop": 0.580893470790378,
"repo_name": "baifenbenniao/FeelUOwn",
"id": "16603b166b2d275ec7ca16bb0955f4eff2159e5d",
"size": "7867",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/widgets/login_dialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6722"
},
{
"name": "CSS",
"bytes": "3719"
},
{
"name": "HTML",
"bytes": "14979"
},
{
"name": "JavaScript",
"bytes": "4940"
},
{
"name": "Makefile",
"bytes": "6795"
},
{
"name": "Python",
"bytes": "146659"
},
{
"name": "Shell",
"bytes": "6857"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Basket',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Component',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('manufacturer', models.CharField(max_length=200)),
('model_name', models.CharField(max_length=200)),
('serial_number', models.CharField(max_length=200)),
('stock_amount', models.PositiveSmallIntegerField(default=0)),
],
options={
'verbose_name': 'Server Component',
'verbose_name_plural': 'Servers Components',
},
),
migrations.CreateModel(
name='ComponentAndTemplatePropertyValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=100, verbose_name='Value', blank=True)),
('value_as_float', models.FloatField(null=True, verbose_name='Value as float', blank=True)),
('component', models.ForeignKey(related_name='property_values', verbose_name='Component', blank=True, to='erp_test.Component', null=True)),
],
),
migrations.CreateModel(
name='ComponentsPropertiesRelation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('position', models.IntegerField(default=999, verbose_name='Position')),
('component', models.ForeignKey(related_name='componentsproperties', verbose_name='Components', to='erp_test.Component')),
],
options={
'ordering': ('position',),
},
),
migrations.CreateModel(
name='Floor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
],
options={
'verbose_name': 'Floor',
'verbose_name_plural': 'Floors',
},
),
migrations.CreateModel(
name='GroupsPropertiesRelation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('position', models.IntegerField(default=999, verbose_name='Position')),
],
options={
'ordering': ('position',),
},
),
migrations.CreateModel(
name='Node',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('address', models.TextField(blank=True)),
],
options={
'verbose_name': 'Node',
'verbose_name_plural': 'Nodes',
},
),
migrations.CreateModel(
name='Property',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='Name')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('position', models.IntegerField(null=True, verbose_name='Position', blank=True)),
('unit', models.CharField(max_length=15, verbose_name='Unit', blank=True)),
('type', models.PositiveSmallIntegerField(default=2, verbose_name='Type', choices=[(1, 'Float field'), (2, 'Text field'), (3, 'Select field')])),
('required', models.BooleanField(default=False, verbose_name='Required')),
('components', models.ManyToManyField(related_name='properties', verbose_name='Components', to='erp_test.Component', through='erp_test.ComponentsPropertiesRelation', blank=True)),
],
options={
'ordering': ['position'],
'verbose_name_plural': 'Properties',
},
),
migrations.CreateModel(
name='PropertyGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='Name', blank=True)),
('position', models.IntegerField(default=1000, verbose_name='Position')),
('components', models.ManyToManyField(related_name='property_groups', verbose_name='Servers Components', to='erp_test.Component', blank=True)),
],
options={
'ordering': ('position',),
},
),
migrations.CreateModel(
name='PropertyOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='Name')),
('position', models.IntegerField(default=99, verbose_name='Position')),
('property', models.ForeignKey(related_name='options', verbose_name='Property', to='erp_test.Property')),
],
options={
'ordering': ['position'],
},
),
migrations.CreateModel(
name='Rack',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
],
options={
'verbose_name': 'Rack',
'verbose_name_plural': 'Racks',
},
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('floor', models.ForeignKey(to='erp_test.Floor')),
],
options={
'verbose_name': 'Room',
'verbose_name_plural': 'Rooms',
},
),
migrations.CreateModel(
name='Row',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
('room', models.ForeignKey(to='erp_test.Room')),
],
options={
'verbose_name': 'Row',
'verbose_name_plural': 'Rows',
},
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ServerComponents',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('quantity', models.PositiveSmallIntegerField(default=0)),
('component', models.ForeignKey(to='erp_test.Component')),
('server', models.ForeignKey(to='erp_test.Server')),
],
),
migrations.CreateModel(
name='ServerTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name='Name')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TemplatesPropertiesRelation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('position', models.IntegerField(default=999, verbose_name='Position')),
('property', models.ForeignKey(verbose_name='Property', to='erp_test.Property')),
('template', models.ForeignKey(related_name='templatesproperties', verbose_name='Servers Templates', to='erp_test.ServerTemplate')),
],
options={
'ordering': ('position',),
},
),
migrations.CreateModel(
name='Unit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('position', models.PositiveSmallIntegerField(default=1)),
('unit_takes', models.PositiveSmallIntegerField(default=1, verbose_name='height in units')),
('basket', models.ForeignKey(blank=True, to='erp_test.Basket', null=True)),
('rack', models.ForeignKey(to='erp_test.Rack')),
('server', models.ForeignKey(blank=True, to='erp_test.Server', null=True)),
],
options={
'verbose_name': 'Unit',
'verbose_name_plural': 'Units',
},
),
migrations.AddField(
model_name='server',
name='components',
field=models.ManyToManyField(to='erp_test.Component', through='erp_test.ServerComponents'),
),
migrations.AddField(
model_name='server',
name='template',
field=models.ForeignKey(to='erp_test.ServerTemplate'),
),
migrations.AddField(
model_name='rack',
name='row',
field=models.ForeignKey(to='erp_test.Row'),
),
migrations.AddField(
model_name='propertygroup',
name='templates',
field=models.ManyToManyField(related_name='property_groups', verbose_name='Servers Templates', to='erp_test.ServerTemplate', blank=True),
),
migrations.AddField(
model_name='property',
name='groups',
field=models.ManyToManyField(related_name='properties', verbose_name='Group', to='erp_test.PropertyGroup', through='erp_test.GroupsPropertiesRelation', blank=True),
),
migrations.AddField(
model_name='property',
name='templates',
field=models.ManyToManyField(related_name='properties', verbose_name='Servers Templates', to='erp_test.ServerTemplate', through='erp_test.TemplatesPropertiesRelation', blank=True),
),
migrations.AddField(
model_name='groupspropertiesrelation',
name='group',
field=models.ForeignKey(related_name='groupproperties', verbose_name='Group', to='erp_test.PropertyGroup'),
),
migrations.AddField(
model_name='groupspropertiesrelation',
name='property',
field=models.ForeignKey(verbose_name='Property', to='erp_test.Property'),
),
migrations.AddField(
model_name='floor',
name='node',
field=models.ForeignKey(to='erp_test.Node'),
),
migrations.AddField(
model_name='componentspropertiesrelation',
name='property',
field=models.ForeignKey(verbose_name='Property', to='erp_test.Property'),
),
migrations.AddField(
model_name='componentandtemplatepropertyvalue',
name='property',
field=models.ForeignKey(related_name='property_values', verbose_name='Property', to='erp_test.Property'),
),
migrations.AddField(
model_name='componentandtemplatepropertyvalue',
name='property_group',
field=models.ForeignKey(related_name='property_values', verbose_name='Property group', blank=True, to='erp_test.PropertyGroup', null=True),
),
migrations.AddField(
model_name='componentandtemplatepropertyvalue',
name='template',
field=models.ForeignKey(related_name='property_values', verbose_name='Servers Templates', blank=True, to='erp_test.ServerTemplate', null=True),
),
migrations.AddField(
model_name='basket',
name='servers',
field=models.ManyToManyField(to='erp_test.Server'),
),
migrations.AlterUniqueTogether(
name='templatespropertiesrelation',
unique_together=set([('template', 'property')]),
),
migrations.AlterUniqueTogether(
name='servercomponents',
unique_together=set([('server', 'component')]),
),
migrations.AlterUniqueTogether(
name='groupspropertiesrelation',
unique_together=set([('group', 'property')]),
),
migrations.AlterUniqueTogether(
name='componentspropertiesrelation',
unique_together=set([('component', 'property')]),
),
migrations.AlterUniqueTogether(
name='componentandtemplatepropertyvalue',
unique_together=set([('component', 'template', 'property', 'property_group', 'value')]),
),
]
| {
"content_hash": "f821bb8923dc3e655994bb038bc001f9",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 195,
"avg_line_length": 45.532051282051285,
"alnum_prop": 0.5466000281571167,
"repo_name": "baffolobill/mb_test_1",
"id": "4230ece68b21c5bdf31ba98eb1add6a38e9f4175",
"size": "14230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mbtest1/erp_test/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "228768"
},
{
"name": "HTML",
"bytes": "54315"
},
{
"name": "JavaScript",
"bytes": "3483943"
},
{
"name": "Makefile",
"bytes": "135"
},
{
"name": "Python",
"bytes": "305003"
},
{
"name": "Shell",
"bytes": "187"
}
],
"symlink_target": ""
} |
from keras.models import Sequential
from keras.layers import Dense
import numpy
def handler(event, context):
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load pima indians dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, init='uniform', activation='relu'))
model.add(Dense(8, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X, Y, nb_epoch=150, batch_size=10, verbose=2)
# calculate predictions
predictions = model.predict(X)
# round predictions
rounded = [round(x[0]) for x in predictions]
print(rounded)
return rounded | {
"content_hash": "54777d16a0e5cf635eacb94f46f9c863",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 34.206896551724135,
"alnum_prop": 0.6713709677419355,
"repo_name": "ryfeus/lambda-packs",
"id": "aa4644f4a803ae2176dfbed37f36f59d94eb51af",
"size": "1026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Keras_tensorflow/source/service.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
"""
Pygments LaTeX formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import unittest
import tempfile
from pygments.formatters import LatexFormatter
from pygments.lexers import PythonLexer
import support
TESTFILE, TESTDIR = support.location(__file__)
class LatexFormatterTest(unittest.TestCase):
def test_valid_output(self):
tokensource = list(PythonLexer().get_tokens(open(TESTFILE).read()))
fmt = LatexFormatter(full=True, encoding='latin1')
handle, pathname = tempfile.mkstemp('.tex')
# place all output files in /tmp too
old_wd = os.getcwd()
os.chdir(os.path.dirname(pathname))
tfile = os.fdopen(handle, 'wb')
fmt.format(tokensource, tfile)
tfile.close()
try:
import subprocess
ret = subprocess.Popen(['latex', '-interaction=nonstopmode',
pathname],
stdout=subprocess.PIPE).wait()
except OSError:
# latex not available
pass
else:
self.failIf(ret, 'latex run reported errors')
os.unlink(pathname)
os.chdir(old_wd)
| {
"content_hash": "08b96551ad470ad11eb8814b62517950",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 28.41304347826087,
"alnum_prop": 0.5921958684009181,
"repo_name": "ceci/pygments-hack",
"id": "dc8231bebee476ad0c0f9d9294c8a98701576f72",
"size": "1331",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_latex_formatter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1627299"
}
],
"symlink_target": ""
} |
"""The tests for the WSDOT platform."""
from datetime import datetime, timedelta, timezone
import re
import homeassistant.components.wsdot.sensor as wsdot
from homeassistant.components.wsdot.sensor import (
ATTR_DESCRIPTION,
ATTR_TIME_UPDATED,
CONF_API_KEY,
CONF_ID,
CONF_NAME,
CONF_TRAVEL_TIMES,
RESOURCE,
SCAN_INTERVAL,
)
from homeassistant.setup import async_setup_component
from tests.common import load_fixture
config = {
CONF_API_KEY: "foo",
SCAN_INTERVAL: timedelta(seconds=120),
CONF_TRAVEL_TIMES: [{CONF_ID: 96, CONF_NAME: "I90 EB"}],
}
async def test_setup_with_config(hass):
"""Test the platform setup with configuration."""
assert await async_setup_component(hass, "sensor", {"wsdot": config})
async def test_setup(hass, requests_mock):
"""Test for operational WSDOT sensor with proper attributes."""
entities = []
def add_entities(new_entities, update_before_add=False):
"""Mock add entities."""
if update_before_add:
for entity in new_entities:
entity.update()
for entity in new_entities:
entities.append(entity)
uri = re.compile(RESOURCE + "*")
requests_mock.get(uri, text=load_fixture("wsdot.json"))
wsdot.setup_platform(hass, config, add_entities)
assert len(entities) == 1
sensor = entities[0]
assert sensor.name == "I90 EB"
assert sensor.state == 11
assert (
sensor.extra_state_attributes[ATTR_DESCRIPTION]
== "Downtown Seattle to Downtown Bellevue via I-90"
)
assert sensor.extra_state_attributes[ATTR_TIME_UPDATED] == datetime(
2017, 1, 21, 15, 10, tzinfo=timezone(timedelta(hours=-8))
)
| {
"content_hash": "5770abc465121cd21b7cf0ded9bf3e6b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 73,
"avg_line_length": 29.56896551724138,
"alnum_prop": 0.6647230320699709,
"repo_name": "adrienbrault/home-assistant",
"id": "bbb56efdedadc2c953eba625d3e13958496a9e8a",
"size": "1715",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/wsdot/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import sys
import unittest
from webkitpy.tool import mocktool
import chromium_gpu
import chromium_linux
import chromium_mac
import chromium_win
import dryrun
import factory
import google_chrome
import gtk
import mac
import qt
import test
import win
class FactoryTest(unittest.TestCase):
"""Test factory creates proper port object for the target.
Target is specified by port_name, sys.platform and options.
"""
# FIXME: The ports themselves should expose what options they require,
# instead of passing generic "options".
def setUp(self):
self.real_sys_platform = sys.platform
self.webkit_options = mocktool.MockOptions(pixel_tests=False)
self.chromium_options = mocktool.MockOptions(pixel_tests=False,
chromium=True)
def tearDown(self):
sys.platform = self.real_sys_platform
def assert_port(self, port_name, expected_port, port_obj=None):
"""Helper assert for port_name.
Args:
port_name: port name to get port object.
expected_port: class of expected port object.
port_obj: optional port object
"""
port_obj = port_obj or factory.get(port_name=port_name)
self.assertTrue(isinstance(port_obj, expected_port))
def assert_platform_port(self, platform, options, expected_port):
"""Helper assert for platform and options.
Args:
platform: sys.platform.
options: options to get port object.
expected_port: class of expected port object.
"""
orig_platform = sys.platform
sys.platform = platform
self.assertTrue(isinstance(factory.get(options=options),
expected_port))
sys.platform = orig_platform
def test_test(self):
self.assert_port("test", test.TestPort)
def test_dryrun(self):
self.assert_port("dryrun-test", dryrun.DryRunPort)
self.assert_port("dryrun-mac", dryrun.DryRunPort)
def test_mac(self):
self.assert_port("mac", mac.MacPort)
self.assert_platform_port("darwin", None, mac.MacPort)
self.assert_platform_port("darwin", self.webkit_options, mac.MacPort)
def test_win(self):
self.assert_port("win", win.WinPort)
self.assert_platform_port("win32", None, win.WinPort)
self.assert_platform_port("win32", self.webkit_options, win.WinPort)
self.assert_platform_port("cygwin", None, win.WinPort)
self.assert_platform_port("cygwin", self.webkit_options, win.WinPort)
def test_google_chrome(self):
# The actual Chrome class names aren't available so we test that the
# objects we get are at least subclasses of the Chromium versions.
self.assert_port("google-chrome-linux32",
chromium_linux.ChromiumLinuxPort)
self.assert_port("google-chrome-linux64",
chromium_linux.ChromiumLinuxPort)
self.assert_port("google-chrome-win",
chromium_win.ChromiumWinPort)
self.assert_port("google-chrome-mac",
chromium_mac.ChromiumMacPort)
def test_gtk(self):
self.assert_port("gtk", gtk.GtkPort)
def test_qt(self):
self.assert_port("qt", qt.QtPort)
def test_chromium_gpu_linux(self):
self.assert_port("chromium-gpu-linux", chromium_gpu.ChromiumGpuLinuxPort)
def test_chromium_gpu_mac(self):
self.assert_port("chromium-gpu-mac", chromium_gpu.ChromiumGpuMacPort)
def test_chromium_gpu_win(self):
self.assert_port("chromium-gpu-win", chromium_gpu.ChromiumGpuWinPort)
def test_chromium_mac(self):
self.assert_port("chromium-mac", chromium_mac.ChromiumMacPort)
self.assert_platform_port("darwin", self.chromium_options,
chromium_mac.ChromiumMacPort)
def test_chromium_linux(self):
self.assert_port("chromium-linux", chromium_linux.ChromiumLinuxPort)
self.assert_platform_port("linux2", self.chromium_options,
chromium_linux.ChromiumLinuxPort)
def test_chromium_win(self):
self.assert_port("chromium-win", chromium_win.ChromiumWinPort)
self.assert_platform_port("win32", self.chromium_options,
chromium_win.ChromiumWinPort)
self.assert_platform_port("cygwin", self.chromium_options,
chromium_win.ChromiumWinPort)
def test_unknown_specified(self):
# Test what happens when you specify an unknown port.
orig_platform = sys.platform
self.assertRaises(NotImplementedError, factory.get,
port_name='unknown')
def test_unknown_default(self):
# Test what happens when you're running on an unknown platform.
orig_platform = sys.platform
sys.platform = 'unknown'
self.assertRaises(NotImplementedError, factory.get)
sys.platform = orig_platform
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ef997b63d8e89b5b73852557011d1908",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 81,
"avg_line_length": 35.985915492957744,
"alnum_prop": 0.639334637964775,
"repo_name": "Xperia-Nicki/android_platform_sony_nicki",
"id": "e4a2cd43f885e5e0935804d9ad7515db1b2606a9",
"size": "6637",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "external/webkit/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "212775"
},
{
"name": "Awk",
"bytes": "19252"
},
{
"name": "C",
"bytes": "68667466"
},
{
"name": "C#",
"bytes": "55625"
},
{
"name": "C++",
"bytes": "54670920"
},
{
"name": "CLIPS",
"bytes": "12224"
},
{
"name": "CSS",
"bytes": "283405"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Java",
"bytes": "4882"
},
{
"name": "JavaScript",
"bytes": "19597804"
},
{
"name": "Objective-C",
"bytes": "5849156"
},
{
"name": "PHP",
"bytes": "17224"
},
{
"name": "Pascal",
"bytes": "42411"
},
{
"name": "Perl",
"bytes": "1632149"
},
{
"name": "Prolog",
"bytes": "214621"
},
{
"name": "Python",
"bytes": "3493321"
},
{
"name": "R",
"bytes": "290"
},
{
"name": "Ruby",
"bytes": "78743"
},
{
"name": "Scilab",
"bytes": "554"
},
{
"name": "Shell",
"bytes": "265637"
},
{
"name": "TypeScript",
"bytes": "45459"
},
{
"name": "XSLT",
"bytes": "11219"
}
],
"symlink_target": ""
} |
import os
import ctypes
import time
import threading
import cv2
import numpy as np
from ikalog.utils import *
from ikalog.inputs import VideoInput
from ikalog.inputs.filters import WarpFilter, WarpCalibrationNotFound, WarpCalibrationUnacceptableSize
_ = Localization.gettext_translation('screencapture', fallback=True).gettext
class ScreenCapture(VideoInput):
cap_optimal_input_resolution = False
# FIXME: Filter classes refer these variables.
out_width = 1280
out_height = 720
# on_valide_warp
# Handler being called by warp calibration process.
# Caller passes the four edge points in raw image to crop game screen.
#
# Return True to approve the points. Calibration process can be canceled
# by passing False.
#
# @param self The object pointer.
# @param points Points. [ left_top, right_top, right_bottom, left_bottom ]
# @return Approve (True) or delcline (False).
def on_validate_warp(self, points):
w = int(points[1][0] - points[0][0])
h = int(points[2][1] - points[1][1])
acceptable_geoms = [[720, 1280], [1080, 1920]]
acceptable = False
exact = False
for geom in acceptable_geoms:
if (geom[0] - 3 < h) and (h < geom[0] + 3):
if (geom[1] - 3 < w) and (w < geom[1] + 3):
acceptable = True
if geom[0] == h and geom[1] == w:
exact = True
if exact:
pass
elif acceptable:
msg = '\n'.join([
_('Due to the input resultion (%d x %d) some recognition may fail unexpectedly.') % (
w, h),
_('IkaLog expects 1280 x 720, or 1920 x 1080 as input resolution.'),
])
IkaUtils.dprint(msg)
else:
return False
self.last_capture_geom = (h, w)
return True
def reset(self):
self._warp_filter = WarpFilter(self)
self._calibration_requested = False
super(ScreenCapture, self).reset()
def auto_calibrate(self, img):
try:
r = self._warp_filter.calibrateWarp(
img,
validation_func=self.on_validate_warp
)
except WarpCalibrationUnacceptableSize as e:
(w, h) = e.shape
msg = '\n'.join([
_('Current image size (%d x %d) cannot be accepted.') % (w, h),
_('IkaLog expects 1280 x 720, or 1920 x 1080 as input resolution.'),
_('Calibration Failed!'),
])
IkaUtils.dprint(msg)
return False
except WarpCalibrationNotFound:
msg = '\n'.join([
_('Could not find any WiiU image from the desktop.'),
_('IkaLog expects 1280 x 720, or 1920 x 1080 as input resolution.'),
_('Calibration Failed!'),
])
IkaUtils.dprint(msg)
return False
if not r:
msg = '\n'.join([
_('No description provided. (could be a bug)'),
_('Calibration Failed!'),
])
return False
IkaUtils.dprint(msg)
self._warp_filter.enable()
IkaUtils.dprint(_('Calibration succeeded!'))
return True
def capture_screen(self):
from PIL import ImageGrab
try:
img = ImageGrab.grab(None)
except TypeError:
# なぜ発生することがあるのか、よくわからない
IkaUtils.dprint('%s: Failed to grab desktop image' % self)
return None
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
# override
def _read_frame_func(self):
img = self.capture_screen()
if self._calibration_requested:
self._calibration_requested = False
self.auto_calibrate(img)
img = self._warp_filter.execute(img)
return img
def on_key_press(self, context, key):
if (key == ord('c') or key == ord('C')):
# 次回キャリブレーションを行う
self._calibration_requested = True
# override
def _is_active_func(self):
return True
# override
def _initialize_driver_func(self):
pass
# override
def _cleanup_driver_func(self):
pass
# override
def _select_device_by_index_func(self, source):
IkaUtils.dprint(
'%s: Does not support _select_device_by_index_func()' % self)
# override
def _select_device_by_name_func(self, source):
IkaUtils.dprint(
'%s: Does not support _select_device_by_name_func()' % self)
if __name__ == "__main__":
obj = ScreenCapture()
k = 0
while k != 27:
frame = obj.read_frame()
if frame is not None:
cv2.imshow(obj.__class__.__name__, frame)
k = cv2.waitKey(1)
obj.on_key_press(None, k)
| {
"content_hash": "eb04f53505dea4e4304b4d8bc75fb25a",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 102,
"avg_line_length": 28.88757396449704,
"alnum_prop": 0.5514133551823024,
"repo_name": "hasegaw/IkaLog",
"id": "e674ad369781401759ed1fac5e07b68d8946500c",
"size": "5654",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ikalog/inputs/win/screencapture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37511"
},
{
"name": "Makefile",
"bytes": "1057"
},
{
"name": "Python",
"bytes": "749095"
},
{
"name": "Shell",
"bytes": "3312"
}
],
"symlink_target": ""
} |
"""Parser for Windows XML EventLog (EVTX) files."""
import pyevtx
from dfdatetime import filetime as dfdatetime_filetime
from plaso.containers import events
from plaso.lib import specification
from plaso.parsers import interface
from plaso.parsers import manager
class WinEvtxRecordEventData(events.EventData):
"""Windows XML EventLog (EVTX) record event data.
Attributes:
creation_time (dfdatetime.DateTimeValues): event record creation date
and time.
computer_name (str): computer name stored in the event record.
event_identifier (int): event identifier.
event_level (int): event level.
event_version (int): event version.
message_identifier (int): event message identifier.
offset (int): offset of the EVTX record relative to the start of the file,
from which the event data was extracted.
provider_identifier (str): identifier of the EventLog provider.
record_number (int): event record number.
recovered (bool): True if the record was recovered.
source_name (str): name of the event source.
strings (list[str]): event strings.
user_sid (str): user security identifier (SID) stored in the event record.
written_time (dfdatetime.DateTimeValues): event record written date and
time.
xml_string (str): XML representation of the event.
"""
DATA_TYPE = 'windows:evtx:record'
def __init__(self):
"""Initializes event data."""
super(WinEvtxRecordEventData, self).__init__(data_type=self.DATA_TYPE)
self.creation_time = None
self.computer_name = None
self.event_identifier = None
self.event_level = None
self.event_version = None
self.message_identifier = None
self.offset = None
self.provider_identifier = None
self.record_number = None
self.recovered = None
self.source_name = None
self.strings = None
self.user_sid = None
self.written_time = None
self.xml_string = None
class WinEvtxParser(interface.FileObjectParser):
"""Parses Windows XML EventLog (EVTX) files."""
_INITIAL_FILE_OFFSET = None
NAME = 'winevtx'
DATA_FORMAT = 'Windows XML EventLog (EVTX) file'
def _GetEventDataFromRecord(
self, parser_mediator, record_index, evtx_record, recovered=False):
"""Extract data from a Windows XML EventLog (EVTX) record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record_index (int): event record index.
evtx_record (pyevtx.record): event record.
recovered (Optional[bool]): True if the record was recovered.
Return:
WinEvtxRecordEventData: event data.
"""
event_data = WinEvtxRecordEventData()
try:
event_data.record_number = evtx_record.identifier
except OverflowError as exception:
warning_message = (
'unable to read record identifier from event record: {0:d} '
'with error: {1!s}').format(record_index, exception)
if recovered:
parser_mediator.ProduceRecoveryWarning(warning_message)
else:
parser_mediator.ProduceExtractionWarning(warning_message)
try:
event_identifier = evtx_record.event_identifier
except OverflowError as exception:
warning_message = (
'unable to read event identifier from event record: {0:d} '
'with error: {1!s}').format(record_index, exception)
if recovered:
parser_mediator.ProduceRecoveryWarning(warning_message)
else:
parser_mediator.ProduceExtractionWarning(warning_message)
event_identifier = None
try:
event_identifier_qualifiers = evtx_record.event_identifier_qualifiers
except OverflowError as exception:
warning_message = (
'unable to read event identifier qualifiers from event record: '
'{0:d} with error: {1!s}').format(record_index, exception)
if recovered:
parser_mediator.ProduceRecoveryWarning(warning_message)
else:
parser_mediator.ProduceExtractionWarning(warning_message)
event_identifier_qualifiers = None
event_data.offset = evtx_record.offset
event_data.recovered = recovered
if event_identifier is not None:
event_data.event_identifier = event_identifier
event_data.message_identifier = event_identifier
if event_identifier_qualifiers is not None:
event_data.message_identifier |= event_identifier_qualifiers << 16
if evtx_record.provider_identifier:
event_data.provider_identifier = evtx_record.provider_identifier.lower()
event_data.event_level = evtx_record.event_level
event_data.event_version = evtx_record.event_version
event_data.source_name = evtx_record.source_name
# Computer name is the value stored in the event record and does not
# necessarily correspond with the actual hostname.
event_data.computer_name = evtx_record.computer_name
event_data.user_sid = evtx_record.user_security_identifier
event_data.strings = list(evtx_record.strings)
event_data.xml_string = evtx_record.xml_string
return event_data
def _ParseRecord(
self, parser_mediator, record_index, evtx_record, recovered=False):
"""Extract data from a Windows XML EventLog (EVTX) record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record_index (int): event record index.
evtx_record (pyevtx.record): event record.
recovered (Optional[bool]): True if the record was recovered.
"""
event_data = self._GetEventDataFromRecord(
parser_mediator, record_index, evtx_record, recovered=recovered)
try:
creation_time = evtx_record.get_creation_time_as_integer()
except OverflowError as exception:
warning_message = (
'unable to read creation time from event record: {0:d} '
'with error: {1!s}').format(record_index, exception)
if recovered:
parser_mediator.ProduceRecoveryWarning(warning_message)
else:
parser_mediator.ProduceExtractionWarning(warning_message)
creation_time = None
if creation_time:
event_data.creation_time = dfdatetime_filetime.Filetime(
timestamp=creation_time)
try:
written_time = evtx_record.get_written_time_as_integer()
except OverflowError as exception:
warning_message = (
'unable to read written time from event record: {0:d} '
'with error: {1!s}').format(record_index, exception)
if recovered:
parser_mediator.ProduceRecoveryWarning(warning_message)
else:
parser_mediator.ProduceExtractionWarning(warning_message)
written_time = None
if written_time:
event_data.written_time = dfdatetime_filetime.Filetime(
timestamp=written_time)
parser_mediator.ProduceEventData(event_data)
def _ParseRecords(self, parser_mediator, evtx_file):
"""Parses Windows XML EventLog (EVTX) records.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
evtx_file (pyevtx.file): Windows XML EventLog (EVTX) file.
"""
# To handle errors when parsing a Windows XML EventLog (EVTX) file in the
# most granular way the following code iterates over every event record.
# The call to evt_file.get_record() and access to members of evt_record
# should be called within a try-except.
for record_index in range(evtx_file.number_of_records):
if parser_mediator.abort:
break
try:
evtx_record = evtx_file.get_record(record_index)
self._ParseRecord(parser_mediator, record_index, evtx_record)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse event record: {0:d} with error: {1!s}'.format(
record_index, exception))
for record_index in range(evtx_file.number_of_recovered_records):
if parser_mediator.abort:
break
try:
evtx_record = evtx_file.get_recovered_record(record_index)
self._ParseRecord(
parser_mediator, record_index, evtx_record, recovered=True)
except IOError as exception:
parser_mediator.ProduceRecoveryWarning((
'unable to parse recovered event record: {0:d} with error: '
'{1!s}').format(record_index, exception))
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(b'ElfFile\x00', offset=0)
return format_specification
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Windows XML EventLog (EVTX) file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
"""
evtx_file = pyevtx.file()
evtx_file.set_ascii_codepage(parser_mediator.codepage)
try:
evtx_file.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open file with error: {0!s}'.format(exception))
return
try:
self._ParseRecords(parser_mediator, evtx_file)
finally:
evtx_file.close()
manager.ParsersManager.RegisterParser(WinEvtxParser)
| {
"content_hash": "fc5d828fbca4a2f353fe9c007fe63f89",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 78,
"avg_line_length": 35.21611721611722,
"alnum_prop": 0.6922196796338673,
"repo_name": "log2timeline/plaso",
"id": "64dfbe8a208cc17cf6a2d33cb8b777edacc93616",
"size": "9638",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "plaso/parsers/winevtx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4301"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "5345186"
},
{
"name": "Shell",
"bytes": "27279"
},
{
"name": "YARA",
"bytes": "507"
}
],
"symlink_target": ""
} |
import chainer
import copy
class _MultiNodeOptimizer(object):
def __init__(self, actual_optimizer, communicator, zero_fill):
super(_MultiNodeOptimizer, self).__setattr__(
'communicator', communicator)
super(_MultiNodeOptimizer, self).__setattr__(
'actual_optimizer', actual_optimizer)
super(_MultiNodeOptimizer, self).__setattr__(
'target_params', [])
super(_MultiNodeOptimizer, self).__setattr__(
'zero_fill', zero_fill)
def update(self, lossfun=None, *args, **kwds):
target = self.target
if lossfun is not None:
use_cleargrads = getattr(self, '_use_cleargrads', True)
loss = lossfun(*args, **kwds)
if use_cleargrads:
target.cleargrads()
else:
target.zerograds()
loss.backward(loss_scale=self.actual_optimizer._loss_scale)
del loss
if self.is_changed(target):
self.communicator.bcast_data(target)
else:
self.communicator.multi_node_mean_grad(target, self.zero_fill)
self.actual_optimizer.update(None, *args, **kwds)
def is_changed(self, target):
previous_params = self.target_params
super(_MultiNodeOptimizer, self).__setattr__(
'target_params', [(name, param.data is not None)
for name, param in sorted(target.namedparams())])
if len(previous_params) != len(self.target_params):
return True
for param1, param2 in zip(self.target_params, previous_params):
if (param1[0] != param2[0]) or param1[1] != param2[1]:
return True
return False
def setup(self, link):
self.actual_optimizer.setup(link)
return self
def __getattr__(self, attr_name):
return getattr(self.actual_optimizer, attr_name)
def __setattr__(self, attr_name, value):
setattr(self.actual_optimizer, attr_name, value)
class _DoubleBufferingOptimizer(object):
def __init__(self, actual_optimizer, communicator, zero_fill):
super(_DoubleBufferingOptimizer, self).__setattr__(
'communicator', communicator)
super(_DoubleBufferingOptimizer, self).__setattr__(
'actual_optimizer', actual_optimizer)
super(_DoubleBufferingOptimizer, self).__setattr__(
'needs_update', False)
super(_DoubleBufferingOptimizer, self).__setattr__(
'communicated_target', None)
super(_DoubleBufferingOptimizer, self).__setattr__(
'target_params_list', [[], []])
super(_DoubleBufferingOptimizer, self).__setattr__(
'allreduce_grad_stream', chainer.cuda.Stream(non_blocking=True))
super(_DoubleBufferingOptimizer, self).__setattr__(
'zero_fill', zero_fill)
def update(self, lossfun=None, *args, **kwds):
target = self.target
if lossfun is not None:
use_cleargrads = getattr(self, '_use_cleargrads', True)
loss = lossfun(*args, **kwds)
if use_cleargrads:
target.cleargrads()
else:
target.zerograds()
loss.backward(loss_scale=self.actual_optimizer._loss_scale)
del loss
if self.is_changed(target, self.target_params_list[0]):
self.wait()
self.communicator.bcast_data(target)
super(_DoubleBufferingOptimizer, self).__setattr__(
'communicated_target', copy.deepcopy(target))
super(_DoubleBufferingOptimizer, self).__setattr__(
'target_params_list', [
list(sorted(self.target.namedparams())),
list(sorted(self.communicated_target.namedparams()))])
super(_DoubleBufferingOptimizer, self).__setattr__(
'needs_update', False)
else:
self.wait()
self.swap_grad(self.target_params_list[0],
self.target_params_list[1])
self.multi_node_mean_grad_async()
if self.needs_update:
self.actual_optimizer.update(None, *args, **kwds)
else:
super(_DoubleBufferingOptimizer, self).__setattr__(
'needs_update', True)
def multi_node_mean_grad_async(self):
self.communicator._multi_node_mean_grad_async(
self.communicated_target, self.zero_fill,
self.allreduce_grad_stream)
def is_changed(self, target, previous_params):
target_params = list(sorted(target.namedparams()))
if len(previous_params) != len(target_params):
return True
for param1, param2 in zip(target_params, previous_params):
name1, var1 = param1
name2, var2 = param2
if (name1 != name2) or (var1.data is None) != (var2.data is None):
return True
return False
def swap_grad(self, target1_params, target2_params):
for param1, param2 in zip(target1_params, target2_params):
_, var1 = param1
_, var2 = param2
var1.grad, var2.grad = var2.grad, var1.grad
def wait(self):
self.allreduce_grad_stream.synchronize()
chainer.cuda.Stream.null.synchronize()
def setup(self, link):
self.actual_optimizer.setup(link)
return self
def __getattr__(self, attr_name):
return getattr(self.actual_optimizer, attr_name)
def __setattr__(self, attr_name, value):
setattr(self.actual_optimizer, attr_name, value)
def create_multi_node_optimizer(actual_optimizer, communicator,
double_buffering=False, zero_fill=True):
"""Create a multi node optimizer from a Chainer optimizer.
Args:
actual_optimizer: Chainer optimizer
(e.g., ``chainer.optimizers.Adam``).
communicator: ChainerMN communicator.
double_buffering: If ``True``, all-reduce and other
processing (such as forward and backward) are
overlapped using double buffering.
There are cases where accuracy is affected because
the gradients of the previous iteration are used
for update. This flag is supported by
``PureNcclCommunicator`` only.
zero_fill: A knob to control whether to fill gradients of initialized
and unused Link (which is None internally) with zero-valued array,
because the all gradients must be an array among processes for
performing all-reduce, which might be an array or None after
backward computation. Gradients of uninitialized Link are skipped.
If it is False, gradients of unused Link are just skipped.
Returns:
The multi node optimizer based on ``actual_optimizer``.
"""
if double_buffering:
from chainermn.communicators.pure_nccl_communicator \
import PureNcclCommunicator
if not isinstance(communicator, PureNcclCommunicator):
raise ValueError(
'This communicator does not support double buffering.')
return _DoubleBufferingOptimizer(actual_optimizer, communicator,
zero_fill)
return _MultiNodeOptimizer(actual_optimizer, communicator,
zero_fill)
| {
"content_hash": "3d5c0f752ce45c341863f23fc7399484",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 79,
"avg_line_length": 40.857142857142854,
"alnum_prop": 0.5954814416352878,
"repo_name": "chainer/chainer",
"id": "68cde0a0c2d841fa02d252966c6a239dd0d4d762",
"size": "7436",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "chainermn/optimizers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3805"
},
{
"name": "C",
"bytes": "1099"
},
{
"name": "C++",
"bytes": "1688016"
},
{
"name": "CMake",
"bytes": "51351"
},
{
"name": "Cuda",
"bytes": "191633"
},
{
"name": "Dockerfile",
"bytes": "6102"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "6431941"
},
{
"name": "Shell",
"bytes": "50151"
}
],
"symlink_target": ""
} |
"""
Tests for metricsT.
"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import numpy as np
import deepchem as dc
from tensorflow.python.platform import googletest
from deepchem import metrics
class MetricsTest(googletest.TestCase):
def test_kappa_score(self):
y_true = [1, 0, 1, 0]
y_pred = [0.8, 0.2, 0.3, 0.4] # [1, 0, 0, 0] with 0.5 threshold
kappa = dc.metrics.kappa_score(y_true, np.greater(y_pred, 0.5))
observed_agreement = 3.0 / 4.0
expected_agreement = ((2 * 1) + (2 * 3)) / 4.0**2
expected_kappa = np.true_divide(observed_agreement - expected_agreement,
1.0 - expected_agreement)
self.assertAlmostEqual(kappa, expected_kappa)
def test_r2_score(self):
"""Test that R^2 metric passes basic sanity tests"""
np.random.seed(123)
n_samples = 10
y_true = np.random.rand(n_samples,)
y_pred = np.random.rand(n_samples,)
regression_metric = dc.metrics.Metric(dc.metrics.r2_score)
assert np.isclose(
dc.metrics.r2_score(y_true, y_pred),
regression_metric.compute_metric(y_true, y_pred))
def test_one_hot(self):
y = np.array([0, 0, 1, 0, 1, 1, 0])
y_hot = metrics.to_one_hot(y)
expected = np.array([[1, 0], [1, 0], [0, 1], [1, 0], [0, 1], [0, 1], [1,
0]])
yp = metrics.from_one_hot(y_hot)
assert np.array_equal(expected, y_hot)
assert np.array_equal(y, yp)
def test_bedroc_score(self):
num_actives = 20
num_total = 400
y_true_actives = np.ones(num_actives)
y_true_inactives = np.zeros(num_total - num_actives)
y_true = np.concatenate([y_true_actives, y_true_inactives])
# Best score case
y_pred_best = dc.metrics.to_one_hot(
np.concatenate([y_true_actives, y_true_inactives]))
best_score = dc.metrics.bedroc_score(y_true, y_pred_best)
self.assertAlmostEqual(best_score, 1.0)
# Worst score case
worst_pred_actives = np.zeros(num_actives)
worst_pred_inactives = np.ones(num_total - num_actives)
y_pred_worst = dc.metrics.to_one_hot(
np.concatenate([worst_pred_actives, worst_pred_inactives]))
worst_score = dc.metrics.bedroc_score(y_true, y_pred_worst)
self.assertAlmostEqual(worst_score, 0.0, 4)
if __name__ == '__main__':
googletest.main()
| {
"content_hash": "1cb16169f1229b94fb9d6431a0ae1884",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 33.513513513513516,
"alnum_prop": 0.6193548387096774,
"repo_name": "ktaneishi/deepchem",
"id": "0d355fd8e4b7472538837a399ab109b5dd69edad",
"size": "2480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepchem/metrics/tests/metrics_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16453"
},
{
"name": "Dockerfile",
"bytes": "794"
},
{
"name": "HTML",
"bytes": "20618"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "Python",
"bytes": "2553147"
},
{
"name": "Shell",
"bytes": "11547"
}
],
"symlink_target": ""
} |
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append([os.path.abspath('../keystone'),
os.path.abspath('..'),
os.path.abspath('../bin')
])
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'sphinx.ext.pngmath',
'sphinx.ext.graphviz',
'sphinx.ext.todo']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Keystone'
copyright = u'2011-present, OpenStack, LLC.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from keystone import version as keystone_version
# The full version, including alpha/beta/rc tags.
release = keystone_version
# The short X.Y version.
version = keystone_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['keystone.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/keystonemanage', 'keystone-manage', u'Keystone Management Utility',
[u'OpenStack'], 1)
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme_path = ["."]
html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'keystonedoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'Keystone.tex', u'Keystone Documentation',
u'Keystone Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'nova': ('http://nova.openstack.org', None),
'swift': ('http://swift.openstack.org', None),
'glance': ('http://glance.openstack.org', None)}
| {
"content_hash": "d1ac9649864afa485f49426ace520d25",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 79,
"avg_line_length": 32.91705069124424,
"alnum_prop": 0.6875262494750105,
"repo_name": "ntt-pf-lab/backup_keystone",
"id": "0fa9eb441041301c721ef54671c81c890886a8e5",
"size": "8151",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67937"
},
{
"name": "Python",
"bytes": "610154"
},
{
"name": "Shell",
"bytes": "4547"
}
],
"symlink_target": ""
} |
from pkg_resources import require
require('numpy')
require('matplotlib')
import sys, os
import matplotlib as mpl
from pylab import *
if len(sys.argv) < 1:
print "### ERROR: not enough arguments. Please supply an input file ###"
fptr = open(sys.argv[1])
datafile=fptr.readline().strip()
dataset=fptr.readline().strip()
config=""
result=""
scans = []
datasetsize = None
algorithm = None
for line in fptr.readlines():
if not datasetsize and line.startswith(' Dataset='):
d = dict( [kv.strip().split('=') for kv in line.split('\t')])
datasetsize = double(d['Dataset'].rstrip('MB'))
if line.startswith('CONFIG:'): config=line
elif line.startswith('RESULT:'):
result = line
# Mash all the config and result key-value pairs into a dictionary
d = dict( [kv.strip().split('=') for kv in result.split('\t')[1:]] )
d.update( dict([kv.strip().split('=') for kv in config.split('\t')[1:]] ))
#scans.append( d )
if not algorithm:
algorithm = d['algo']
scans.append( (d['threads'], int(d['level']), double(d['Ratio']), double(d['Datarate'].rstrip('MB/s'))) )
fptr.close()
# Analyse and plot results
#for s in scans: print s
#scans = array(scans)
#print scans.shape
#scans = scans.reshape((2, 10, 3))
#print scans
results = dict()
for thread,level,ratio, datarate in scans:
if not results.has_key(thread):
results.update({thread: [(level,ratio, datarate)]})
else:
results[thread].append((level,ratio,datarate))
legends=[]
markers= [ 's', 'o', 'v', '^', '+', 'x', '>', '<', '.', ',' ]
for i,threadcount in enumerate(results):
#print zip(*results[threadcount])
legends.append('%s threads'%threadcount)
xy = zip(*results[threadcount])
x=xy[1]
y=xy[2]
plot(x,y, marker=markers[i])
plot([1,5],[1280, 1280*6], linestyle='-.', color='black')
ylim(0,8000)
xlim(1,None)
text(1.1, 1280+50, "10 GigE")
axhline(1280, linewidth=3, linestyle='-.', color='black')
text(0.5, 10000, "dataset: %.2fGB\nalgorithm: %s"%(datasetsize/1024., algorithm ))
title(datafile)
legend(legends, loc='best')
xlabel('Compresssion ratio')
ylabel('Speed (MB/s)')
grid(True)
show()
| {
"content_hash": "2304e5f0d814719b46db7d55f36741bc",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 113,
"avg_line_length": 27.21951219512195,
"alnum_prop": 0.6160394265232975,
"repo_name": "ulrikpedersen/benchpress",
"id": "d7b4b1449b55bb9b283b1cb40f73648fbab265ad",
"size": "2254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot-result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "10080"
},
{
"name": "Python",
"bytes": "2254"
},
{
"name": "Shell",
"bytes": "3085"
}
],
"symlink_target": ""
} |
"""
This script converts qcarchive json to cjson
It includes automatic bond detection, since qcarchive
json does not store bonds.
"""
import json
import click
from avogadro.core import Molecule
from avogadro.io import FileFormatManager
from openbabel import OBMol, OBConversion
# Copied from mongochemserver
def avo_convert_str(str_data, in_format, out_format):
mol = Molecule()
conv = FileFormatManager()
conv.read_string(mol, str_data, in_format)
return conv.write_string(mol, out_format)
# Copied from mongochemserver
def cjson_to_ob_molecule(cjson):
cjson_str = json.dumps(cjson)
sdf_str = avo_convert_str(cjson_str, 'cjson', 'sdf')
conv = OBConversion()
conv.SetInFormat('sdf')
conv.SetOutFormat('sdf')
mol = OBMol()
conv.ReadString(mol, sdf_str)
return mol
# Copied from mongochemserver
def autodetect_bonds(cjson):
mol = cjson_to_ob_molecule(cjson)
mol.ConnectTheDots()
mol.PerceiveBondOrders()
conv = OBConversion()
conv.SetInFormat('sdf')
conv.SetOutFormat('sdf')
sdf_str = conv.WriteString(mol)
cjson_str = avo_convert_str(sdf_str, 'sdf', 'cjson')
return json.loads(cjson_str)
def convert_to_cjson(qcjson):
cjson = {}
cjson['chemicalJson'] = 1
# The qcjson geometry is in atomic units. Convert to angstrom.
for i in range(len(qcjson['geometry'])):
qcjson['geometry'][i] *= 0.529177249
cjson['atoms'] = {
'coords': {
'3d': qcjson['geometry']
},
'elements': {
'number': qcjson['atomic_numbers']
}
}
# Auto-detect bonds, since qcjson does not store them
cjson = autodetect_bonds(cjson)
return cjson
@click.command()
@click.argument('input_file', type=click.File('r'))
@click.argument('output_file', type=click.File('w'))
def main(input_file, output_file):
qcjson = json.load(input_file)
cjson = convert_to_cjson(qcjson)
json.dump(cjson, output_file)
if __name__ == '__main__':
main()
| {
"content_hash": "7d948b74d21fc36fef1b57aa7a5312b0",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 66,
"avg_line_length": 23.952380952380953,
"alnum_prop": 0.658051689860835,
"repo_name": "OpenChemistry/mongochemdeploy",
"id": "d9697872d292f168b99535d9f46ebbe287e99fd2",
"size": "2036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ingest/qcarchive/convert_to_cjson.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "6863"
},
{
"name": "Jupyter Notebook",
"bytes": "3091"
},
{
"name": "Python",
"bytes": "34946"
},
{
"name": "Shell",
"bytes": "5786"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def settwopanel( height_ratios = [1.0,0.3],
width_ratios = [1.,0.] ,
padding = None,
setoffset = None,
figsize = None ):
"""
returns a figure and axes for a main panel and a lower panel for
showing differential information of overplotted quantities in
the top panel.
args :
height_ratios: list of floats, optional defaults to
[1.0, 0.3]
height ratio between the upper and lower panel
width_ratios :list of floats, optional defaults to
[1.0, 0.0]
width ratio between the left and right panel
figsize:
returns :
figure object , ax0 (axes for top panel) , and ax1
(axes for lower panel)
usage :
>>> myfig, myax0 , myax1 = settwopanel ( )
>>> myax0.plot( x, y)
>>> myax1.plot(x, x)
>>> myfig.tight_layout()
status :
tested by
R. Biswas, Fri Feb 21 00:52:55 CST 2014
"""
import matplotlib.ticker as ticker
majorformatter = ticker.ScalarFormatter(useOffset =False)
if figsize == None:
fig = plt.figure()
else:
fig = plt.figure(figsize = figsize)
gs = gridspec.GridSpec ( 2, 1, width_ratios = width_ratios , height_ratios = height_ratios)
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
ax0.set_xticklabels("",visible = False)
ax1.yaxis.set_major_formatter(majorformatter)
hpad = 0.0
#gridspec.update(hpad = hpad)
return fig , ax0 , ax1
if __name__ == "__main__":
x = np.arange(0,10,0.1)
y = x * x
myfig, myax0 , myax1 = settwopanel ( )
myax0.plot( x, y)
myax1.plot(x, x)
myfig.tight_layout()
plt.show()
| {
"content_hash": "3f8b683f57b22949e3b2f4a906cf91bc",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 92,
"avg_line_length": 21.25,
"alnum_prop": 0.6569659442724458,
"repo_name": "rbiswas4/FluctuationsInCosmology",
"id": "3a369fcbc9cf0d2ffe12ca00a991b854bc9b9a33",
"size": "1638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plotutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101379"
}
],
"symlink_target": ""
} |
"""Build App Engine source package.
"""
import json
import optparse
import os
import shutil
import subprocess
import sys
#import requests
import test_file_herder
USAGE = """%prog src_path dest_path
Build the GAE source code package.
src_path Path to the source code root directory.
dest_path Path to the root directory to push/deploy GAE from."""
def call_cmd_and_return_output_lines(cmd):
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = process.communicate()[0]
return output.split('\n')
except OSError as e:
print str(e)
return []
def build_version_info_file(dest_path):
"""Build the version info JSON file."""
version_info = {
'gitHash': None,
'time': None,
'branch': None
}
lines = call_cmd_and_return_output_lines(['git', 'log', '-1'])
for line in lines:
if line.startswith('commit'):
version_info['gitHash'] = line.partition(' ')[2].strip()
elif line.startswith('Date'):
version_info['time'] = line.partition(':')[2].strip()
if version_info['gitHash'] is not None and version_info['time'] is not None:
break
lines = call_cmd_and_return_output_lines(['git', 'branch'])
for line in lines:
if line.startswith('*'):
version_info['branch'] = line.partition(' ')[2].strip()
break
try:
with open(dest_path, 'w') as f:
f.write(json.dumps(version_info))
except IOError as e:
print str(e)
# Copy pako zlib from node_modules to third_party/pako
def copyPako(dest_path):
dest_js_path = os.path.join(dest_path, 'third_party', 'pako')
os.makedirs(dest_js_path)
shutil.copy('node_modules/pako/dist/pako.min.js', dest_js_path)
def CopyApprtcSource(src_path, dest_path):
if os.path.exists(dest_path):
shutil.rmtree(dest_path)
os.makedirs(dest_path)
simply_copy_subdirs = ['bigquery', 'css', 'images', 'third_party']
for dirpath, unused_dirnames, files in os.walk(src_path):
for subdir in simply_copy_subdirs:
if dirpath.endswith(subdir):
shutil.copytree(dirpath, os.path.join(dest_path, subdir))
if dirpath.endswith('html'):
dest_html_path = os.path.join(dest_path, 'html')
os.makedirs(dest_html_path)
for name in files:
# Template files must be in the root directory.
if name.endswith('_template.html'):
shutil.copy(os.path.join(dirpath, name), dest_path)
else:
shutil.copy(os.path.join(dirpath, name), dest_html_path)
elif dirpath.endswith('app_engine'):
for name in files:
if (name.endswith('.py') and 'test' not in name
or name.endswith('.yaml')):
shutil.copy(os.path.join(dirpath, name), dest_path)
elif dirpath.endswith('js'):
for name in files:
# loopback.js is not compiled by Closure
# and need to be copied separately.
if name in ['loopback.js']:
dest_js_path = os.path.join(dest_path, 'js')
if not os.path.exists(dest_js_path):
os.makedirs(dest_js_path)
shutil.copy(os.path.join(dirpath, name), dest_js_path)
build_version_info_file(os.path.join(dest_path, 'version_info.json'))
def main():
parser = optparse.OptionParser(USAGE)
parser.add_option("-t", "--include-tests", action="store_true",
help='Also copy python tests to the out dir.')
options, args = parser.parse_args()
if len(args) != 2:
parser.error('Error: Exactly 2 arguments required.')
src_path, dest_path = args[0:2]
CopyApprtcSource(src_path, dest_path)
#copyPako(dest_path)
if options.include_tests:
app_engine_code = os.path.join(src_path, 'app_engine')
test_file_herder.CopyTests(os.path.join(src_path, 'app_engine'), dest_path)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "a23008c4990b99d0ad6b870a5831b48f",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 80,
"avg_line_length": 30.524193548387096,
"alnum_prop": 0.6446499339498019,
"repo_name": "webrtc/apprtc",
"id": "05852fa3bbee7a95be2c25d5e0a20b8e1bd7d46a",
"size": "3804",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/build_app_engine_package.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6551"
},
{
"name": "Dockerfile",
"bytes": "4611"
},
{
"name": "Go",
"bytes": "38382"
},
{
"name": "HTML",
"bytes": "21581"
},
{
"name": "JavaScript",
"bytes": "176875"
},
{
"name": "Python",
"bytes": "98405"
},
{
"name": "Shell",
"bytes": "2263"
}
],
"symlink_target": ""
} |
import numpy
from chainer import distributions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestLogNormal(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.LogNormal
self.scipy_dist = stats.lognorm
self.test_targets = set([
"batch_shape", "entropy", "event_shape", "log_prob", "mean",
"sample", "support", "variance"])
mu = utils.force_array(
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32))
sigma = utils.force_array(numpy.exp(numpy.random.uniform(
-1, 0, self.shape)).astype(numpy.float32))
self.params = {"mu": mu, "sigma": sigma}
self.scipy_params = {"s": sigma, "scale": numpy.exp(mu)}
self.support = 'positive'
def sample_for_test(self):
smp = numpy.random.lognormal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| {
"content_hash": "f98df3996233c8f01bdcee9dac7df181",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 29.25581395348837,
"alnum_prop": 0.615262321144674,
"repo_name": "ktnyt/chainer",
"id": "96ca79a07b6f9817828888eaff24e0461a36d3b5",
"size": "1258",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/distributions_tests/test_log_normal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1440363"
},
{
"name": "CMake",
"bytes": "42822"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1242"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5128330"
},
{
"name": "Shell",
"bytes": "19475"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django import http
from django.core.urlresolvers import resolve
class SmartAppendSlashMiddleware(object):
"""
"SmartAppendSlash" middleware for taking care of URL rewriting.
This middleware appends a missing slash, if:
* the SMART_APPEND_SLASH setting is True
* the URL without the slash does not exist
* the URL with an appended slash does exist.
Otherwise it won't touch the URL.
"""
def process_request(self, request):
"""
Rewrite the URL based on settings.SMART_APPEND_SLASH
"""
# Check for a redirect based on settings.SMART_APPEND_SLASH
host = http.get_host(request)
old_url = [host, request.path]
new_url = old_url[:]
# Append a slash if SMART_APPEND_SLASH is set and the resulting URL
# resolves.
if settings.SMART_APPEND_SLASH and (not old_url[1].endswith('/')) and not _resolves(old_url[1]) and _resolves(old_url[1] + '/'):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError, "You called this URL via POST, but the URL doesn't end in a slash and you have SMART_APPEND_SLASH set. Django can't redirect to the slash URL while maintaining POST data. Change your form to point to %s%s (note the trailing slash), or set SMART_APPEND_SLASH=False in your Django settings." % (new_url[0], new_url[1])
if new_url != old_url:
# Redirect
if new_url[0]:
newurl = "%s://%s%s" % (request.is_secure() and 'https' or 'http', new_url[0], new_url[1])
else:
newurl = new_url[1]
if request.GET:
newurl += '?' + request.GET.urlencode()
return http.HttpResponsePermanentRedirect(newurl)
return None
def _resolves(url):
try:
resolve(url)
return True
except http.Http404:
return False
| {
"content_hash": "12cef92a1b699b4855701ea54a2e87b0",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 352,
"avg_line_length": 41.104166666666664,
"alnum_prop": 0.6188545362392296,
"repo_name": "hunch/hunch-sample-app",
"id": "47aa1027c48f6e15f2c98a191c686534b08288fd",
"size": "1973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "114941"
},
{
"name": "Python",
"bytes": "4059962"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
} |
from six.moves import range
import matplotlib.pyplot as plt
from striatum.storage import (
MemoryHistoryStorage,
MemoryModelStorage,
MemoryActionStorage,
Action,
)
from striatum.bandit import UCB1
from striatum import simulation
def main():
context_dimension = 5
action_storage = MemoryActionStorage()
action_storage.add([Action(i) for i in range(5)])
# Regret Analysis
n_rounds = 10000
context, desired_actions = simulation.simulate_data(
n_rounds, context_dimension, action_storage, random_state=1)
policy = UCB1(MemoryHistoryStorage(), MemoryModelStorage(),
action_storage)
for t in range(n_rounds):
history_id, recommendation = policy.get_action(context[t])
action_id = recommendation.action.id
if desired_actions[t] != action_id:
policy.reward(history_id, {action_id: 0})
else:
policy.reward(history_id, {action_id: 1})
policy.plot_avg_regret()
plt.show()
if __name__ == '__main__':
main()
| {
"content_hash": "2dac3facec91ce0b3ad77b5af255c1ff",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 68,
"avg_line_length": 26.794871794871796,
"alnum_prop": 0.6564593301435406,
"repo_name": "ntucllab/striatum",
"id": "c3cbc221e7dbfb9003d559e03408239b1a0e6acf",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulation/simulation_ucb1.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "97590"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sqlite3, zlib
import sys
#Implements https://github.com/mapbox/mbtiles-spec/blob/master/1.2/spec.md
class MBTiles(object):
def __init__(self, filename):
self.conn = sqlite3.connect(filename)
self.c = self.conn.cursor()
self.schemaReady = False
def __del__(self):
self.conn.commit()
self.c.close()
del self.conn
def ListTiles(self):
rows = self.c.execute("SELECT zoom_level, tile_column, tile_row FROM tiles")
out = []
for row in rows:
out.append((row[0], row[1], row[2]))
return out
def GetTile(self, zoomLevel, tileColumn, tileRow):
rows = self.c.execute("SELECT tile_data FROM tiles WHERE zoom_level = ? AND tile_column = ? AND tile_row = ?",
(zoomLevel, tileColumn, tileRow))
rows = list(rows)
if len(rows) == 0:
raise RuntimeError("Tile not found")
row = rows[0]
return row[0]
def CheckSchema(self):
sql = "CREATE TABLE IF NOT EXISTS metadata (name text, value text)"
self.c.execute(sql)
sql = "CREATE TABLE IF NOT EXISTS tiles (zoom_level integer, tile_column integer, tile_row integer, tile_data blob)"
self.c.execute(sql)
sql = "CREATE INDEX IF NOT EXISTS tiles_index ON tiles (zoom_level, tile_column, tile_row)"
self.c.execute(sql)
self.schemaReady = True
def GetAllMetaData(self):
rows = self.c.execute("SELECT name, value FROM metadata")
out = {}
for row in rows:
out[row[0]] = row[1]
return out
def SetMetaData(self, name, value):
if not self.schemaReady:
self.CheckSchema()
self.c.execute("UPDATE metadata SET value=? WHERE name=?", (value, name))
if self.c.rowcount == 0:
self.c.execute("INSERT INTO metadata (name, value) VALUES (?, ?);", (name, value))
self.conn.commit()
def DeleteMetaData(self, name):
if not self.schemaReady:
self.CheckSchema()
self.c.execute("DELETE FROM metadata WHERE name = ?", (name,))
self.conn.commit()
if self.c.rowcount == 0:
raise RuntimeError("Metadata name not found")
def SetTile(self, zoomLevel, tileColumn, tileRow, data):
if not self.schemaReady:
self.CheckSchema()
self.c.execute("UPDATE tiles SET tile_data=? WHERE zoom_level = ? AND tile_column = ? AND tile_row = ?",
(data, zoomLevel, tileColumn, tileRow))
if self.c.rowcount == 0:
self.c.execute("INSERT INTO tiles (zoom_level, tile_column, tile_row, tile_data) VALUES (?, ?, ?, ?);",
(zoomLevel, tileColumn, tileRow, data))
def DeleteTile(self, zoomLevel, tileColumn, tileRow):
if not self.schemaReady:
self.CheckSchema()
self.c.execute("DELETE FROM tiles WHERE zoom_level = ? AND tile_column = ? AND tile_row = ?",
(data, zoomLevel, tileColumn, tileRow))
self.conn.commit()
if self.c.rowcount == 0:
raise RuntimeError("Tile not found")
def Commit(self):
self.conn.commit()
if __name__ == "__main__":
fina = "andorra.mbtiles"
zoom = 12
tilex = 1936
tiley = 2779
if len(sys.argv) > 1:
fina = sys.argv[1]
if len(sys.argv) > 2:
zoom = int(sys.argv[2])
if len(sys.argv) > 3:
tilex = int(sys.argv[3])
if len(sys.argv) > 4:
tiley = int(sys.argv[4])
mbTiles = MBTiles(fina)
metadata = mbTiles.GetAllMetaData()
for k in metadata:
print (k, metadata[k])
try:
data = mbTiles.GetTile(zoom, tilex, tiley)
except RuntimeError as err:
print (err)
print ("The first 100 tiles:", mbTiles.ListTiles()[:100])
exit(0)
print ("compressed", len(data))
decData = zlib.decompress(data, 16+zlib.MAX_WBITS)
print ("uncompressed", len(decData))
#mbTiles.SetTile(14, 8275, 10323, data)
metadata = mbTiles.GetAllMetaData()
mbTiles.SetMetaData("foo", "bar")
mbTiles.DeleteMetaData("foo")
fi = open("out.mvt", "wb")
fi.write(data)
fi.close()
print ("Saved out.mvt")
| {
"content_hash": "089e748e7d568f08340bd8017a197b92",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 118,
"avg_line_length": 27.45925925925926,
"alnum_prop": 0.6725114647963313,
"repo_name": "TimSC/pyMbTiles",
"id": "4ffd84c8f70f9dc598c3c6f1449e9a486ba5e1f9",
"size": "3707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MBTiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20013"
}
],
"symlink_target": ""
} |
import os
import shutil
import tempfile
import unittest
from profile_creators.history_profile_extender import HistoryProfileExtender
from telemetry import decorators
from telemetry.testing import options_for_unittests
import mock # pylint: disable=import-error
# Testing private method.
# pylint: disable=protected-access
class HistoryProfileExtenderTest(unittest.TestCase):
# The profile extender does not work on Android or ChromeOS.
@decorators.Disabled('android', 'chromeos')
def testFullFunctionality(self):
options = options_for_unittests.GetCopy()
options.output_profile_path = tempfile.mkdtemp()
extender = HistoryProfileExtender(options)
# Stop the extender at the earliest possible opportunity.
extender.ShouldExitAfterBatchNavigation = mock.MagicMock(return_value=True)
# Normally, the number of tabs depends on the number of cores. Use a
# static, small number to increase the speed of the test.
extender._NUM_TABS = 3
try:
extender.Run()
self.assertEquals(extender.profile_path, options.output_profile_path)
self.assertTrue(os.path.exists(extender.profile_path))
history_db_path = os.path.join(extender.profile_path, "Default",
"History")
stat_info = os.stat(history_db_path)
self.assertGreater(stat_info.st_size, 1000)
finally:
shutil.rmtree(options.output_profile_path)
| {
"content_hash": "6dc349f1d337e8fb14e452d49ddabbb4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 36.578947368421055,
"alnum_prop": 0.7496402877697842,
"repo_name": "ltilve/ChromiumGStreamerBackend",
"id": "66de67fcff5611e7db4cde6f94014d6dc99fc19e",
"size": "1552",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/perf/profile_creators/history_profile_extender_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9568645"
},
{
"name": "C++",
"bytes": "246813997"
},
{
"name": "CSS",
"bytes": "943687"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27371019"
},
{
"name": "Java",
"bytes": "15348315"
},
{
"name": "JavaScript",
"bytes": "20872607"
},
{
"name": "Makefile",
"bytes": "70983"
},
{
"name": "Objective-C",
"bytes": "2029825"
},
{
"name": "Objective-C++",
"bytes": "10156554"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "182741"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "494625"
},
{
"name": "Python",
"bytes": "8594611"
},
{
"name": "Shell",
"bytes": "486464"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from db_edu_util import all_pred
from string import ascii_lowercase, ascii_uppercase
def test_all_pred():
assert all_pred(lambda x: x > 0, [10, 20, 30, 40]) == True
assert all_pred(lambda x: x > 0, [0, 20, 30, 40]) == False
assert all_pred(lambda x: x > 0, [20, 30, 0, 40]) == False
assert all_pred(lambda c: c in ascii_uppercase, ascii_uppercase) == True
assert all_pred(lambda c: c in ascii_uppercase, ascii_lowercase) == False
assert all_pred(lambda c: c in ascii_uppercase, "SADLFKJaLKJSDF") == False
| {
"content_hash": "2c46bc70e585e7d16af8620a332aa3c8",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 78,
"avg_line_length": 48.27272727272727,
"alnum_prop": 0.664783427495292,
"repo_name": "databricks-edu/build-tooling",
"id": "499eb789df1de71906888bf068ac6ae5bed94ec9",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_edu_util/tests/test_all_pred.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1707"
},
{
"name": "Python",
"bytes": "394219"
},
{
"name": "Shell",
"bytes": "22711"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import json
import socket
import sys
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
thesocket = None
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
print("=== socket opened ===")
global thesocket
thesocket = self.request
while True:
try:
data = self.request.recv(4096).decode('utf-8')
except socket.error:
print("=== socket error ===")
break
except IOError:
print("=== socket closed ===")
break
if data == '':
print("=== socket closed ===")
break
print("received: {0}".format(data))
try:
decoded = json.loads(data)
except ValueError:
print("json decoding failed")
decoded = [-1, '']
# Send a response if the sequence number is positive.
# Negative numbers are used for "eval" responses.
if decoded[0] >= 0:
if decoded[1] == 'hello!':
response = "got it"
else:
response = "what?"
encoded = json.dumps([decoded[0], response])
print("sending {0}".format(encoded))
self.request.sendall(encoded.encode('utf-8'))
thesocket = None
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == "__main__":
HOST, PORT = "localhost", 8765
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print("Server loop running in thread: ", server_thread.name)
print("Listening on port {0}".format(PORT))
while True:
typed = sys.stdin.readline()
if "quit" in typed:
print("Goodbye!")
break
if thesocket is None:
print("No socket yet")
else:
print("sending {0}".format(typed))
thesocket.sendall(typed.encode('utf-8'))
server.shutdown()
server.server_close()
| {
"content_hash": "a7b5ca4528c5f3b450e5f0b4d328dad2",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 77,
"avg_line_length": 30.511904761904763,
"alnum_prop": 0.5595005852516582,
"repo_name": "operepo/ope",
"id": "00fe8bfbca4f02e906716ce094df1cc47bc92520",
"size": "3106",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "client_tools/svc/rc/usr/share/vim/vim82/tools/demoserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AL",
"bytes": "40379"
},
{
"name": "Awk",
"bytes": "22377"
},
{
"name": "Batchfile",
"bytes": "81725"
},
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "200907"
},
{
"name": "CMake",
"bytes": "8149"
},
{
"name": "CSS",
"bytes": "103747"
},
{
"name": "Dockerfile",
"bytes": "47152"
},
{
"name": "Emacs Lisp",
"bytes": "90665"
},
{
"name": "HTML",
"bytes": "37373861"
},
{
"name": "Java",
"bytes": "916104"
},
{
"name": "JavaScript",
"bytes": "9115492"
},
{
"name": "Makefile",
"bytes": "7428"
},
{
"name": "NewLisp",
"bytes": "111955"
},
{
"name": "PHP",
"bytes": "5053"
},
{
"name": "Perl",
"bytes": "45839826"
},
{
"name": "PostScript",
"bytes": "192210"
},
{
"name": "PowerShell",
"bytes": "2870"
},
{
"name": "Procfile",
"bytes": "114"
},
{
"name": "Prolog",
"bytes": "248055"
},
{
"name": "Python",
"bytes": "9037346"
},
{
"name": "QML",
"bytes": "125647"
},
{
"name": "QMake",
"bytes": "7566"
},
{
"name": "Raku",
"bytes": "7174577"
},
{
"name": "Roff",
"bytes": "25148"
},
{
"name": "Ruby",
"bytes": "162111"
},
{
"name": "Shell",
"bytes": "2574077"
},
{
"name": "Smalltalk",
"bytes": "77031"
},
{
"name": "SystemVerilog",
"bytes": "83394"
},
{
"name": "Tcl",
"bytes": "7061959"
},
{
"name": "Vim script",
"bytes": "27705984"
},
{
"name": "kvlang",
"bytes": "60630"
}
],
"symlink_target": ""
} |
import tensorflow as tf
from argparse import ArgumentParser
from src.model import CryptoNet
from src.config import *
def build_parser():
parser = ArgumentParser()
parser.add_argument('--msg-len', type=int,
dest='msg_len', help='message length',
metavar='MSG_LEN', default=MSG_LEN)
parser.add_argument('--learning-rate', type=float,
dest='learning_rate',
help='learning rate (default %(default)s)',
metavar='LEARNING_RATE', default=LEARNING_RATE)
parser.add_argument('--epochs', type=int,
dest='epochs', help='Number of Epochs in Adversarial Training',
metavar='EPOCHS', default=NUM_EPOCHS)
parser.add_argument('--batch-size', type=int,
dest='batch_size', help='batch size',
metavar='BATCH_SIZE', default=BATCH_SIZE)
return parser
def main():
parser = build_parser()
options = parser.parse_args()
with tf.Session() as sess:
crypto_net = CryptoNet(sess, msg_len=options.msg_len, epochs=options.epochs,
batch_size=options.batch_size, learning_rate=options.learning_rate)
crypto_net.train()
if __name__ == '__main__':
main()
| {
"content_hash": "9a900b0f149ca93a76661c04391a7dda",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 98,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.5669409124906507,
"repo_name": "ankeshanand/neural-cryptography-tensorflow",
"id": "74c93a3865dd915752c858eaa5958ec7f9499b6c",
"size": "1337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8329"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import DrivercoinTestFramework
from test_framework.util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = AuthServiceProxy(node.url, timeout=600)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(DrivercoinTestFramework):
'''
Test longpolling with getblocktemplate.
'''
def run_test(self):
print "Warning: this test will take about 70 seconds in the best case. Be patient."
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| {
"content_hash": "c7dd3b8732f17ccde814d9be8f0633a9",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 113,
"avg_line_length": 39.69767441860465,
"alnum_prop": 0.6417691857059168,
"repo_name": "drivercoin/drivercoin",
"id": "36668fcbca65b200ac053dba862ca6caad79b123",
"size": "3627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/getblocktemplate_longpoll.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "516246"
},
{
"name": "C++",
"bytes": "3917943"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18658"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2103"
},
{
"name": "Makefile",
"bytes": "63590"
},
{
"name": "Objective-C",
"bytes": "2022"
},
{
"name": "Objective-C++",
"bytes": "7252"
},
{
"name": "Protocol Buffer",
"bytes": "2320"
},
{
"name": "Python",
"bytes": "492079"
},
{
"name": "QMake",
"bytes": "2023"
},
{
"name": "Shell",
"bytes": "30719"
}
],
"symlink_target": ""
} |
from anvil.components.configurators import base
class Configurator(base.Configurator):
DB_NAME = "quantum"
PLUGIN_CLASS = "quantum.plugins.UNKNOWN"
def __init__(self, installer, configs, adjusters):
super(Configurator, self).__init__(
installer, configs)
self.config_adjusters = adjusters
@property
def config_files(self):
return list(self.configs)
@property
def get_plugin_config_file_path(self):
return ""
class CorePluginConfigurator(Configurator):
def __init__(self, installer, configs, adjusters):
self.core_plugin = installer.get_option("core_plugin")
super(CorePluginConfigurator, self).__init__(
installer,
["plugins/%s/%s" % (self.core_plugin, name) for name in configs],
dict(
("plugins/%s/%s" % (self.core_plugin, key), value)
for key, value in adjusters.iteritems()))
| {
"content_hash": "3d801bcfa2180590e427c2012938dc04",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 29.65625,
"alnum_prop": 0.6174920969441517,
"repo_name": "toby82/anvil",
"id": "e32f74a361b7d2f2739b7db60f449d4739545ce5",
"size": "1626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anvil/components/configurators/quantum_plugins/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from msrest.serialization import Model
class EffectiveNetworkSecurityGroup(Model):
"""Effective network security group.
:param network_security_group: The ID of network security group that is
applied.
:type network_security_group:
~azure.mgmt.network.v2017_10_01.models.SubResource
:param association: Associated resources.
:type association:
~azure.mgmt.network.v2017_10_01.models.EffectiveNetworkSecurityGroupAssociation
:param effective_security_rules: A collection of effective security rules.
:type effective_security_rules:
list[~azure.mgmt.network.v2017_10_01.models.EffectiveNetworkSecurityRule]
:param tag_map: Mapping of tags to list of IP Addresses included within
the tag.
:type tag_map: dict[str, list[str]]
"""
_attribute_map = {
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'},
'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
'tag_map': {'key': 'tagMap', 'type': '{[str]}'},
}
def __init__(self, network_security_group=None, association=None, effective_security_rules=None, tag_map=None):
super(EffectiveNetworkSecurityGroup, self).__init__()
self.network_security_group = network_security_group
self.association = association
self.effective_security_rules = effective_security_rules
self.tag_map = tag_map
| {
"content_hash": "2d20801ed093bd30e78d0d0be4cb3f8f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 115,
"avg_line_length": 45.794117647058826,
"alnum_prop": 0.7026332691072575,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "8cb4f05b949389c391d0bcb40b0c05faed93951c",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/effective_network_security_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import logging
from copy import deepcopy
import mock
import pytest
from . import common
log = logging.getLogger(__file__)
class _PSUtilSwapStatsMock(object):
def __init__(self, sin, sout):
self.sin = sin
self.sout = sout
ORIG_SWAP_IN = float(115332743168)
ORIG_SWAP_OUT = float(22920884224)
SWAP_IN_INCR = 2
SWAP_OUT_INCR = 4
MOCK_PSUTIL_SWAP_STATS = [
_PSUtilSwapStatsMock(ORIG_SWAP_IN, ORIG_SWAP_OUT),
_PSUtilSwapStatsMock(ORIG_SWAP_IN + SWAP_IN_INCR, ORIG_SWAP_OUT + SWAP_OUT_INCR),
]
@pytest.fixture
def mock_psutil():
with mock.patch('psutil.swap_memory', side_effect=MOCK_PSUTIL_SWAP_STATS):
yield
def test_system_swap(check, mock_psutil, aggregator):
check.check(deepcopy(common.INSTANCE))
aggregator.assert_metric('system.swap.swapped_in', value=ORIG_SWAP_IN, count=1, tags=common.INSTANCE.get("tags"))
aggregator.assert_metric('system.swap.swapped_out', value=ORIG_SWAP_OUT, count=1, tags=common.INSTANCE.get("tags"))
| {
"content_hash": "c6fdffbf10e42cf69b2d59c1764b8b2c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 119,
"avg_line_length": 24.825,
"alnum_prop": 0.7069486404833837,
"repo_name": "DataDog/integrations-core",
"id": "df1a6d922cf842f8bb60c5443c440ae2e58e22b4",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system_swap/tests/test_system_swap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import datetime
import sys
import time
import warnings
sys.path[0:0] = [""]
from bson.objectid import ObjectId
from bson.py3compat import text_type
from bson.son import SON
from pymongo import CursorType, monitoring, InsertOne, UpdateOne, DeleteOne
from pymongo.command_cursor import CommandCursor
from pymongo.errors import NotMasterError, OperationFailure
from pymongo.read_preferences import ReadPreference
from pymongo.write_concern import WriteConcern
from test import unittest, client_context, client_knobs
from test.utils import (EventListener,
rs_or_single_client,
single_client,
wait_until)
class TestCommandMonitoring(unittest.TestCase):
@classmethod
@client_context.require_connection
def setUpClass(cls):
cls.listener = EventListener()
cls.saved_listeners = monitoring._LISTENERS
# Don't use any global subscribers.
monitoring._LISTENERS = monitoring._Listeners([], [], [], [])
cls.client = rs_or_single_client(event_listeners=[cls.listener])
@classmethod
def tearDownClass(cls):
monitoring._LISTENERS = cls.saved_listeners
def tearDown(self):
self.listener.results.clear()
def test_started_simple(self):
self.client.pymongo_test.command('ismaster')
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(SON([('ismaster', 1)]), started.command)
self.assertEqual('ismaster', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
def test_succeeded_simple(self):
self.client.pymongo_test.command('ismaster')
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertEqual('ismaster', succeeded.command_name)
self.assertEqual(self.client.address, succeeded.connection_id)
self.assertEqual(1, succeeded.reply.get('ok'))
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertTrue(isinstance(succeeded.duration_micros, int))
def test_failed_simple(self):
try:
self.client.pymongo_test.command('oops!')
except OperationFailure:
pass
results = self.listener.results
started = results['started'][0]
failed = results['failed'][0]
self.assertEqual(0, len(results['succeeded']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertTrue(
isinstance(failed, monitoring.CommandFailedEvent))
self.assertEqual('oops!', failed.command_name)
self.assertEqual(self.client.address, failed.connection_id)
self.assertEqual(0, failed.failure.get('ok'))
self.assertTrue(isinstance(failed.request_id, int))
self.assertTrue(isinstance(failed.duration_micros, int))
def test_find_one(self):
self.client.pymongo_test.test.find_one()
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(
SON([('find', 'test'),
('filter', {}),
('limit', 1),
('singleBatch', True)]),
started.command)
self.assertEqual('find', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
def test_find_and_get_more(self):
self.client.pymongo_test.test.drop()
self.client.pymongo_test.test.insert_many([{} for _ in range(10)])
self.listener.results.clear()
cursor = self.client.pymongo_test.test.find(
projection={'_id': False},
batch_size=4)
for _ in range(4):
next(cursor)
cursor_id = cursor.cursor_id
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(
SON([('find', 'test'),
('filter', {}),
('projection', {'_id': False}),
('batchSize', 4)]),
started.command)
self.assertEqual('find', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(isinstance(succeeded.duration_micros, int))
self.assertEqual('find', succeeded.command_name)
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertEqual(cursor.address, succeeded.connection_id)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["id"], cursor_id)
self.assertEqual(csr["ns"], "pymongo_test.test")
self.assertEqual(csr["firstBatch"], [{} for _ in range(4)])
self.listener.results.clear()
# Next batch. Exhausting the cursor could cause a getMore
# that returns id of 0 and no results.
next(cursor)
try:
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(
SON([('getMore', cursor_id),
('collection', 'test'),
('batchSize', 4)]),
started.command)
self.assertEqual('getMore', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(isinstance(succeeded.duration_micros, int))
self.assertEqual('getMore', succeeded.command_name)
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertEqual(cursor.address, succeeded.connection_id)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["id"], cursor_id)
self.assertEqual(csr["ns"], "pymongo_test.test")
self.assertEqual(csr["nextBatch"], [{} for _ in range(4)])
finally:
# Exhaust the cursor to avoid kill cursors.
tuple(cursor)
def test_find_with_explain(self):
cmd = SON([('explain', SON([('find', 'test'),
('filter', {})]))])
self.client.pymongo_test.test.drop()
self.client.pymongo_test.test.insert_one({})
self.listener.results.clear()
coll = self.client.pymongo_test.test
# Test that we publish the unwrapped command.
if self.client.is_mongos and client_context.version.at_least(2, 4, 0):
coll = coll.with_options(
read_preference=ReadPreference.PRIMARY_PREFERRED)
res = coll.find().explain()
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(cmd, started.command)
self.assertEqual('explain', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(isinstance(succeeded.duration_micros, int))
self.assertEqual('explain', succeeded.command_name)
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertEqual(self.client.address, succeeded.connection_id)
self.assertEqual(res, succeeded.reply)
def test_find_options(self):
cmd = SON([('find', 'test'),
('filter', {}),
('comment', 'this is a test'),
('sort', SON([('_id', 1)])),
('projection', {'x': False}),
('skip', 1),
('batchSize', 2),
('noCursorTimeout', True),
('allowPartialResults', True)])
self.client.pymongo_test.test.drop()
self.client.pymongo_test.test.insert_many([{'x': i} for i in range(5)])
self.listener.results.clear()
coll = self.client.pymongo_test.test
# Test that we publish the unwrapped command.
if self.client.is_mongos and client_context.version.at_least(2, 4, 0):
coll = coll.with_options(
read_preference=ReadPreference.PRIMARY_PREFERRED)
cursor = coll.find(
filter={},
projection={'x': False},
skip=1,
no_cursor_timeout=True,
sort=[('_id', 1)],
allow_partial_results=True,
modifiers=SON([('$comment', 'this is a test')]),
batch_size=2)
next(cursor)
try:
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(cmd, started.command)
self.assertEqual('find', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(isinstance(succeeded.duration_micros, int))
self.assertEqual('find', succeeded.command_name)
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertEqual(self.client.address, succeeded.connection_id)
finally:
# Exhaust the cursor to avoid kill cursors.
tuple(cursor)
@client_context.require_version_min(2, 6, 0)
def test_command_and_get_more(self):
self.client.pymongo_test.test.drop()
self.client.pymongo_test.test.insert_many(
[{'x': 1} for _ in range(10)])
self.listener.results.clear()
coll = self.client.pymongo_test.test
# Test that we publish the unwrapped command.
if self.client.is_mongos and client_context.version.at_least(2, 4, 0):
coll = coll.with_options(
read_preference=ReadPreference.PRIMARY_PREFERRED)
cursor = coll.aggregate(
[{'$project': {'_id': False, 'x': 1}}], batchSize=4)
for _ in range(4):
next(cursor)
cursor_id = cursor.cursor_id
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(
SON([('aggregate', 'test'),
('pipeline', [{'$project': {'_id': False, 'x': 1}}]),
('cursor', {'batchSize': 4})]),
started.command)
self.assertEqual('aggregate', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(isinstance(succeeded.duration_micros, int))
self.assertEqual('aggregate', succeeded.command_name)
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertEqual(cursor.address, succeeded.connection_id)
expected_cursor = {'id': cursor_id,
'ns': 'pymongo_test.test',
'firstBatch': [{'x': 1} for _ in range(4)]}
self.assertEqual(expected_cursor, succeeded.reply.get('cursor'))
self.listener.results.clear()
next(cursor)
try:
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(
SON([('getMore', cursor_id),
('collection', 'test'),
('batchSize', 4)]),
started.command)
self.assertEqual('getMore', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(isinstance(succeeded.duration_micros, int))
self.assertEqual('getMore', succeeded.command_name)
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertEqual(cursor.address, succeeded.connection_id)
expected_result = {
'cursor': {'id': cursor_id,
'ns': 'pymongo_test.test',
'nextBatch': [{'x': 1} for _ in range(4)]},
'ok': 1}
self.assertEqual(expected_result, succeeded.reply)
finally:
# Exhaust the cursor to avoid kill cursors.
tuple(cursor)
def test_get_more_failure(self):
address = self.client.address
coll = self.client.pymongo_test.test
cursor_doc = {"id": 12345, "firstBatch": [], "ns": coll.full_name}
cursor = CommandCursor(coll, cursor_doc, address)
try:
next(cursor)
except Exception:
pass
results = self.listener.results
started = results['started'][0]
self.assertEqual(0, len(results['succeeded']))
failed = results['failed'][0]
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(
SON([('getMore', 12345),
('collection', 'test')]),
started.command)
self.assertEqual('getMore', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(failed, monitoring.CommandFailedEvent))
self.assertTrue(isinstance(failed.duration_micros, int))
self.assertEqual('getMore', failed.command_name)
self.assertTrue(isinstance(failed.request_id, int))
self.assertEqual(cursor.address, failed.connection_id)
self.assertEqual(0, failed.failure.get("ok"))
@client_context.require_replica_set
@client_context.require_secondaries_count(1)
def test_not_master_error(self):
address = next(iter(client_context.client.secondaries))
client = single_client(*address, event_listeners=[self.listener])
# Clear authentication command results from the listener.
client.admin.command('ismaster')
self.listener.results.clear()
error = None
try:
client.pymongo_test.test.find_one_and_delete({})
except NotMasterError as exc:
error = exc.errors
results = self.listener.results
started = results['started'][0]
failed = results['failed'][0]
self.assertEqual(0, len(results['succeeded']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertTrue(
isinstance(failed, monitoring.CommandFailedEvent))
self.assertEqual('findAndModify', failed.command_name)
self.assertEqual(address, failed.connection_id)
self.assertEqual(0, failed.failure.get('ok'))
self.assertTrue(isinstance(failed.request_id, int))
self.assertTrue(isinstance(failed.duration_micros, int))
self.assertEqual(error, failed.failure)
@client_context.require_no_mongos
def test_exhaust(self):
self.client.pymongo_test.test.drop()
self.client.pymongo_test.test.insert_many([{} for _ in range(10)])
self.listener.results.clear()
cursor = self.client.pymongo_test.test.find(
projection={'_id': False},
batch_size=5,
cursor_type=CursorType.EXHAUST)
next(cursor)
cursor_id = cursor.cursor_id
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(
SON([('find', 'test'),
('filter', {}),
('projection', {'_id': False}),
('batchSize', 5)]),
started.command)
self.assertEqual('find', started.command_name)
self.assertEqual(cursor.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(isinstance(succeeded.duration_micros, int))
self.assertEqual('find', succeeded.command_name)
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertEqual(cursor.address, succeeded.connection_id)
expected_result = {
'cursor': {'id': cursor_id,
'ns': 'pymongo_test.test',
'firstBatch': [{} for _ in range(5)]},
'ok': 1}
self.assertEqual(expected_result, succeeded.reply)
self.listener.results.clear()
tuple(cursor)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(
SON([('getMore', cursor_id),
('collection', 'test'),
('batchSize', 5)]),
started.command)
self.assertEqual('getMore', started.command_name)
self.assertEqual(cursor.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(isinstance(succeeded.duration_micros, int))
self.assertEqual('getMore', succeeded.command_name)
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertEqual(cursor.address, succeeded.connection_id)
expected_result = {
'cursor': {'id': 0,
'ns': 'pymongo_test.test',
'nextBatch': [{} for _ in range(5)]},
'ok': 1}
self.assertEqual(expected_result, succeeded.reply)
def test_kill_cursors(self):
with client_knobs(kill_cursor_frequency=0.01):
self.client.pymongo_test.test.drop()
self.client.pymongo_test.test.insert_many([{} for _ in range(10)])
cursor = self.client.pymongo_test.test.find().batch_size(5)
next(cursor)
cursor_id = cursor.cursor_id
self.listener.results.clear()
cursor.close()
time.sleep(2)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
# There could be more than one cursor_id here depending on
# when the thread last ran.
self.assertIn(cursor_id, started.command['cursors'])
self.assertEqual('killCursors', started.command_name)
self.assertIs(type(started.connection_id), tuple)
self.assertEqual(cursor.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(isinstance(succeeded.duration_micros, int))
self.assertEqual('killCursors', succeeded.command_name)
self.assertTrue(isinstance(succeeded.request_id, int))
self.assertIs(type(succeeded.connection_id), tuple)
self.assertEqual(cursor.address, succeeded.connection_id)
# There could be more than one cursor_id here depending on
# when the thread last ran.
self.assertTrue(cursor_id in succeeded.reply['cursorsUnknown']
or cursor_id in succeeded.reply['cursorsKilled'])
def test_non_bulk_writes(self):
coll = self.client.pymongo_test.test
coll.drop()
self.listener.results.clear()
# Implied write concern insert_one
res = coll.insert_one({'x': 1})
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('insert', coll.name),
('ordered', True),
('documents', [{'_id': res.inserted_id, 'x': 1}])])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('insert', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
# Unacknowledged insert_one
self.listener.results.clear()
coll = coll.with_options(write_concern=WriteConcern(w=0))
res = coll.insert_one({'x': 1})
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('insert', coll.name),
('ordered', True),
('documents', [{'_id': res.inserted_id, 'x': 1}]),
('writeConcern', {'w': 0})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('insert', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
self.assertEqual(succeeded.reply, {'ok': 1})
# Explicit write concern insert_one
self.listener.results.clear()
coll = coll.with_options(write_concern=WriteConcern(w=1))
res = coll.insert_one({'x': 1})
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('insert', coll.name),
('ordered', True),
('documents', [{'_id': res.inserted_id, 'x': 1}]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('insert', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
# delete_many
self.listener.results.clear()
res = coll.delete_many({'x': 1})
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('delete', coll.name),
('ordered', True),
('deletes', [SON([('q', {'x': 1}),
('limit', 0)])]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('delete', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(res.deleted_count, reply.get('n'))
# replace_one
self.listener.results.clear()
oid = ObjectId()
res = coll.replace_one({'_id': oid}, {'_id': oid, 'x': 1}, upsert=True)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('update', coll.name),
('ordered', True),
('updates', [SON([('q', {'_id': oid}),
('u', {'_id': oid, 'x': 1}),
('multi', False),
('upsert', True)])]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('update', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
self.assertEqual([{'index': 0, '_id': oid}], reply.get('upserted'))
# update_one
self.listener.results.clear()
res = coll.update_one({'x': 1}, {'$inc': {'x': 1}})
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('update', coll.name),
('ordered', True),
('updates', [SON([('q', {'x': 1}),
('u', {'$inc': {'x': 1}}),
('multi', False),
('upsert', False)])]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('update', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
# update_many
self.listener.results.clear()
res = coll.update_many({'x': 2}, {'$inc': {'x': 1}})
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('update', coll.name),
('ordered', True),
('updates', [SON([('q', {'x': 2}),
('u', {'$inc': {'x': 1}}),
('multi', True),
('upsert', False)])]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('update', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
# delete_one
self.listener.results.clear()
res = coll.delete_one({'x': 3})
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('delete', coll.name),
('ordered', True),
('deletes', [SON([('q', {'x': 3}),
('limit', 1)])]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('delete', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
self.assertEqual(0, coll.count())
# write errors
coll.insert_one({'_id': 1})
try:
self.listener.results.clear()
coll.insert_one({'_id': 1})
except OperationFailure:
pass
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('insert', coll.name),
('ordered', True),
('documents', [{'_id': 1}]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('insert', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(0, reply.get('n'))
errors = reply.get('writeErrors')
self.assertIsInstance(errors, list)
error = errors[0]
self.assertEqual(0, error.get('index'))
self.assertIsInstance(error.get('code'), int)
self.assertIsInstance(error.get('errmsg'), text_type)
def test_legacy_writes(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
coll = self.client.pymongo_test.test
coll.drop()
self.listener.results.clear()
# Implied write concern insert
_id = coll.insert({'x': 1})
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('insert', coll.name),
('ordered', True),
('documents', [{'_id': _id, 'x': 1}])])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('insert', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
# Unacknowledged insert
self.listener.results.clear()
_id = coll.insert({'x': 1}, w=0)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('insert', coll.name),
('ordered', True),
('documents', [{'_id': _id, 'x': 1}]),
('writeConcern', {'w': 0})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('insert', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
self.assertEqual(succeeded.reply, {'ok': 1})
# Explicit write concern insert
self.listener.results.clear()
_id = coll.insert({'x': 1}, w=1)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('insert', coll.name),
('ordered', True),
('documents', [{'_id': _id, 'x': 1}]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('insert', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
# remove all
self.listener.results.clear()
res = coll.remove({'x': 1}, w=1)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('delete', coll.name),
('ordered', True),
('deletes', [SON([('q', {'x': 1}),
('limit', 0)])]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('delete', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(res['n'], reply.get('n'))
# upsert
self.listener.results.clear()
oid = ObjectId()
coll.update({'_id': oid}, {'_id': oid, 'x': 1}, upsert=True, w=1)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('update', coll.name),
('ordered', True),
('updates', [SON([('q', {'_id': oid}),
('u', {'_id': oid, 'x': 1}),
('multi', False),
('upsert', True)])]),
('writeConcern', {'w': 1})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('update', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
self.assertEqual([{'index': 0, '_id': oid}], reply.get('upserted'))
# update one
self.listener.results.clear()
coll.update({'x': 1}, {'$inc': {'x': 1}})
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('update', coll.name),
('ordered', True),
('updates', [SON([('q', {'x': 1}),
('u', {'$inc': {'x': 1}}),
('multi', False),
('upsert', False)])])])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('update', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
# update many
self.listener.results.clear()
coll.update({'x': 2}, {'$inc': {'x': 1}}, multi=True)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('update', coll.name),
('ordered', True),
('updates', [SON([('q', {'x': 2}),
('u', {'$inc': {'x': 1}}),
('multi', True),
('upsert', False)])])])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('update', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
# remove one
self.listener.results.clear()
coll.remove({'x': 3}, multi=False)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('delete', coll.name),
('ordered', True),
('deletes', [SON([('q', {'x': 3}),
('limit', 1)])])])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('delete', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
reply = succeeded.reply
self.assertEqual(1, reply.get('ok'))
self.assertEqual(1, reply.get('n'))
self.assertEqual(0, coll.count())
def test_insert_many(self):
# This always uses the bulk API.
coll = self.client.pymongo_test.test
coll.drop()
self.listener.results.clear()
big = 'x' * (1024 * 1024 * 4)
docs = [{'_id': i, 'big': big} for i in range(6)]
coll.insert_many(docs)
results = self.listener.results
started = results['started']
succeeded = results['succeeded']
self.assertEqual(0, len(results['failed']))
documents = []
count = 0
operation_id = started[0].operation_id
self.assertIsInstance(operation_id, int)
for start, succeed in zip(started, succeeded):
self.assertIsInstance(start, monitoring.CommandStartedEvent)
cmd = start.command
self.assertEqual(['insert', 'ordered', 'documents'],
list(cmd.keys()))
self.assertEqual(coll.name, cmd['insert'])
self.assertIs(True, cmd['ordered'])
documents.extend(cmd['documents'])
self.assertEqual('pymongo_test', start.database_name)
self.assertEqual('insert', start.command_name)
self.assertIsInstance(start.request_id, int)
self.assertEqual(self.client.address, start.connection_id)
self.assertIsInstance(succeed, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeed.duration_micros, int)
self.assertEqual(start.command_name, succeed.command_name)
self.assertEqual(start.request_id, succeed.request_id)
self.assertEqual(start.connection_id, succeed.connection_id)
self.assertEqual(start.operation_id, operation_id)
self.assertEqual(succeed.operation_id, operation_id)
reply = succeed.reply
self.assertEqual(1, reply.get('ok'))
count += reply.get('n', 0)
self.assertEqual(documents, docs)
self.assertEqual(6, count)
def test_legacy_insert_many(self):
# On legacy servers this uses bulk OP_INSERT.
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
coll = self.client.pymongo_test.test
coll.drop()
self.listener.results.clear()
# Force two batches on legacy servers.
big = 'x' * (1024 * 1024 * 12)
docs = [{'_id': i, 'big': big} for i in range(6)]
coll.insert(docs)
results = self.listener.results
started = results['started']
succeeded = results['succeeded']
self.assertEqual(0, len(results['failed']))
documents = []
count = 0
operation_id = started[0].operation_id
self.assertIsInstance(operation_id, int)
for start, succeed in zip(started, succeeded):
self.assertIsInstance(start, monitoring.CommandStartedEvent)
cmd = start.command
self.assertEqual(['insert', 'ordered', 'documents'],
list(cmd.keys()))
self.assertEqual(coll.name, cmd['insert'])
self.assertIs(True, cmd['ordered'])
documents.extend(cmd['documents'])
self.assertEqual('pymongo_test', start.database_name)
self.assertEqual('insert', start.command_name)
self.assertIsInstance(start.request_id, int)
self.assertEqual(self.client.address, start.connection_id)
self.assertIsInstance(succeed, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeed.duration_micros, int)
self.assertEqual(start.command_name, succeed.command_name)
self.assertEqual(start.request_id, succeed.request_id)
self.assertEqual(start.connection_id, succeed.connection_id)
self.assertEqual(start.operation_id, operation_id)
self.assertEqual(succeed.operation_id, operation_id)
reply = succeed.reply
self.assertEqual(1, reply.get('ok'))
count += reply.get('n', 0)
self.assertEqual(documents, docs)
self.assertEqual(6, count)
def test_bulk_write(self):
coll = self.client.pymongo_test.test
coll.drop()
self.listener.results.clear()
coll.bulk_write([InsertOne({'_id': 1}),
UpdateOne({'_id': 1}, {'$set': {'x': 1}}),
DeleteOne({'_id': 1})])
results = self.listener.results
started = results['started']
succeeded = results['succeeded']
self.assertEqual(0, len(results['failed']))
operation_id = started[0].operation_id
pairs = list(zip(started, succeeded))
self.assertEqual(3, len(pairs))
for start, succeed in pairs:
self.assertIsInstance(start, monitoring.CommandStartedEvent)
self.assertEqual('pymongo_test', start.database_name)
self.assertIsInstance(start.request_id, int)
self.assertEqual(self.client.address, start.connection_id)
self.assertIsInstance(succeed, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeed.duration_micros, int)
self.assertEqual(start.command_name, succeed.command_name)
self.assertEqual(start.request_id, succeed.request_id)
self.assertEqual(start.connection_id, succeed.connection_id)
self.assertEqual(start.operation_id, operation_id)
self.assertEqual(succeed.operation_id, operation_id)
expected = SON([('insert', coll.name),
('ordered', True),
('documents', [{'_id': 1}])])
self.assertEqual(expected, started[0].command)
expected = SON([('update', coll.name),
('ordered', True),
('updates', [SON([('q', {'_id': 1}),
('u', {'$set': {'x': 1}}),
('multi', False),
('upsert', False)])])])
self.assertEqual(expected, started[1].command)
expected = SON([('delete', coll.name),
('ordered', True),
('deletes', [SON([('q', {'_id': 1}),
('limit', 1)])])])
self.assertEqual(expected, started[2].command)
def test_write_errors(self):
coll = self.client.pymongo_test.test
coll.drop()
self.listener.results.clear()
try:
coll.bulk_write([InsertOne({'_id': 1}),
InsertOne({'_id': 1}),
InsertOne({'_id': 1}),
DeleteOne({'_id': 1})],
ordered=False)
except OperationFailure:
pass
results = self.listener.results
started = results['started']
succeeded = results['succeeded']
self.assertEqual(0, len(results['failed']))
operation_id = started[0].operation_id
pairs = list(zip(started, succeeded))
errors = []
for start, succeed in pairs:
self.assertIsInstance(start, monitoring.CommandStartedEvent)
self.assertEqual('pymongo_test', start.database_name)
self.assertIsInstance(start.request_id, int)
self.assertEqual(self.client.address, start.connection_id)
self.assertIsInstance(succeed, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeed.duration_micros, int)
self.assertEqual(start.command_name, succeed.command_name)
self.assertEqual(start.request_id, succeed.request_id)
self.assertEqual(start.connection_id, succeed.connection_id)
self.assertEqual(start.operation_id, operation_id)
self.assertEqual(succeed.operation_id, operation_id)
if 'writeErrors' in succeed.reply:
errors.extend(succeed.reply['writeErrors'])
self.assertEqual(2, len(errors))
fields = set(['index', 'code', 'errmsg'])
for error in errors:
self.assertEqual(fields, set(error))
def test_first_batch_helper(self):
# Regardless of server version and use of helpers._first_batch
# this test should still pass.
self.listener.results.clear()
self.client.pymongo_test.collection_names()
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('listCollections', 1), ('cursor', {})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('listCollections', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
self.listener.results.clear()
tuple(self.client.pymongo_test.test.list_indexes())
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('listIndexes', 'test'), ('cursor', {})])
self.assertEqual(expected, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('listIndexes', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
self.listener.results.clear()
self.client.pymongo_test.current_op(True)
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = SON([('currentOp', 1), ('$all', True)])
self.assertEqual(expected, started.command)
self.assertEqual('admin', started.database_name)
self.assertEqual('currentOp', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
if not client_context.is_mongos:
self.client.fsync(lock=True)
self.listener.results.clear()
self.client.unlock()
# Wait for async unlock...
wait_until(
lambda: not self.client.is_locked, "unlock the database")
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
expected = {'fsyncUnlock': 1}
self.assertEqual(expected, started.command)
self.assertEqual('admin', started.database_name)
self.assertEqual('fsyncUnlock', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertIsInstance(succeeded.duration_micros, int)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
def test_sensitive_commands(self):
listeners = self.client._event_listeners
self.listener.results.clear()
cmd = SON([("getnonce", 1)])
listeners.publish_command_start(
cmd, "pymongo_test", 12345, self.client.address)
delta = datetime.timedelta(milliseconds=100)
listeners.publish_command_success(
delta, {'nonce': 'e474f4561c5eb40b', 'ok': 1.0},
"getnonce", 12345, self.client.address)
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertIsInstance(started, monitoring.CommandStartedEvent)
self.assertEqual({}, started.command)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('getnonce', started.command_name)
self.assertIsInstance(started.request_id, int)
self.assertEqual(self.client.address, started.connection_id)
self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent)
self.assertEqual(succeeded.duration_micros, 100000)
self.assertEqual(started.command_name, succeeded.command_name)
self.assertEqual(started.request_id, succeeded.request_id)
self.assertEqual(started.connection_id, succeeded.connection_id)
self.assertEqual({}, succeeded.reply)
class TestGlobalListener(unittest.TestCase):
@classmethod
@client_context.require_connection
def setUpClass(cls):
cls.listener = EventListener()
cls.saved_listeners = monitoring._LISTENERS
monitoring.register(cls.listener)
cls.client = single_client()
# Get one (authenticated) socket in the pool.
cls.client.pymongo_test.command('ismaster')
@classmethod
def tearDownClass(cls):
monitoring._LISTENERS = cls.saved_listeners
def setUp(self):
self.listener.results.clear()
def test_simple(self):
self.client.pymongo_test.command('ismaster')
results = self.listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertTrue(
isinstance(succeeded, monitoring.CommandSucceededEvent))
self.assertTrue(
isinstance(started, monitoring.CommandStartedEvent))
self.assertEqual(SON([('ismaster', 1)]), started.command)
self.assertEqual('ismaster', started.command_name)
self.assertEqual(self.client.address, started.connection_id)
self.assertEqual('pymongo_test', started.database_name)
self.assertTrue(isinstance(started.request_id, int))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "3288db97b12f3e340a50878b595d8997",
"timestamp": "",
"source": "github",
"line_count": 1312,
"max_line_length": 80,
"avg_line_length": 48.958079268292686,
"alnum_prop": 0.5982438933258606,
"repo_name": "aherlihy/mongo-python-driver",
"id": "ad3803168dd1d0922c79fec2549729c6c701a3e0",
"size": "64807",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_monitoring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "202602"
},
{
"name": "Python",
"bytes": "1846556"
},
{
"name": "Shell",
"bytes": "7279"
}
],
"symlink_target": ""
} |
import zeit.cms.testing
import zeit.wysiwyg.testing
def test_suite():
return zeit.cms.testing.FunctionalDocFileSuite(
'html.txt',
'reference.txt',
package='zeit.wysiwyg',
layer=zeit.wysiwyg.testing.WYSIWYGLayer)
| {
"content_hash": "9c5d5f9e2c3ab5d7700e5a25c62b55d7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 51,
"avg_line_length": 25,
"alnum_prop": 0.68,
"repo_name": "ZeitOnline/zeit.wysiwyg",
"id": "e456c03393ab3675cc332ebd88e0af16924a4daf",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/wysiwyg/tests/test_doctests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3837"
},
{
"name": "JavaScript",
"bytes": "26134"
},
{
"name": "Python",
"bytes": "46555"
}
],
"symlink_target": ""
} |
import os, sys
# to read dependencies from ./lib direcroty
script_dir = os.path.dirname( os.path.realpath(__file__) )
sys.path.insert(0, script_dir + os.sep + "lib")
import logging, boto3, json, random
# setup log level to DEBUG
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# initialize DynamoDB client
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMO_TABLE'])
def handler(event, context):
return table.scan(IndexName='Masked')['Items']
def save(player):
table.put_item(Item=player)
def response(body, event, code=200):
if 'resource' in event and 'httpMethod' in event:
return {
'statusCode': code,
'headers': {},
'body': json.dumps(body, indent=4, separators=(',', ':'))
}
return body | {
"content_hash": "07862bb7263201d6e22d6454739a5407",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 25.032258064516128,
"alnum_prop": 0.6752577319587629,
"repo_name": "akranga/mafia-serverless",
"id": "4ba78130bc897d371227dccb24dd695d3c3ab30d",
"size": "776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solutions/state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "598"
},
{
"name": "Python",
"bytes": "16722"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
from tffm import TFFMClassifier
from scipy import sparse as sp
import tensorflow as tf
import pickle
class TestFM(unittest.TestCase):
def setUp(self):
# Reproducibility.
np.random.seed(0)
n_samples = 20
n_features = 10
self.X = np.random.randn(n_samples, n_features)
self.y = np.random.binomial(1, 0.5, size=n_samples)
def decision_function_order_4(self, input_type, use_diag=False):
# Explanation for init_std=1.0.
# With small init_std the contribution of higher order terms is
# neglectable, so we would essentially test only low-order implementation.
# That's why a relatively high init_std=1.0 here.
model = TFFMClassifier(
order=4,
rank=10,
optimizer=tf.train.AdamOptimizer(learning_rate=0.1),
n_epochs=0,
input_type=input_type,
init_std=1.0,
seed=0,
use_diag=use_diag
)
if input_type == 'dense':
X = self.X
else:
X = sp.csr_matrix(self.X)
model.fit(X, self.y)
b = model.intercept
w = model.weights
desired = self.bruteforce_inference(self.X, w, b, use_diag=use_diag)
actual = model.decision_function(X)
model.destroy()
np.testing.assert_almost_equal(actual, desired, decimal=4)
def test_dense_FM(self):
self.decision_function_order_4(input_type='dense', use_diag=False)
def test_dense_PN(self):
self.decision_function_order_4(input_type='dense', use_diag=True)
def test_sparse_FM(self):
self.decision_function_order_4(input_type='sparse', use_diag=False)
def test_sparse_PN(self):
self.decision_function_order_4(input_type='sparse', use_diag=True)
def bruteforce_inference_one_interaction(self, X, w, order, use_diag):
n_obj, n_feat = X.shape
ans = np.zeros(n_obj)
if order == 2:
for i in range(n_feat):
for j in range(0 if use_diag else i+1, n_feat):
x_prod = X[:, i] * X[:, j]
w_prod = np.sum(w[1][i, :] * w[1][j, :])
denominator = 2.0**(order-1) if use_diag else 1.0
ans += x_prod * w_prod / denominator
elif order == 3:
for i in range(n_feat):
for j in range(0 if use_diag else i+1, n_feat):
for k in range(0 if use_diag else j+1, n_feat):
x_prod = X[:, i] * X[:, j] * X[:, k]
w_prod = np.sum(w[2][i, :] * w[2][j, :] * w[2][k, :])
denominator = 2.0**(order-1) if use_diag else 1.0
ans += x_prod * w_prod / denominator
elif order == 4:
for i in range(n_feat):
for j in range(0 if use_diag else i+1, n_feat):
for k in range(0 if use_diag else j+1, n_feat):
for ell in range(0 if use_diag else k+1, n_feat):
x_prod = X[:, i] * X[:, j] * X[:, k] * X[:, ell]
w_prod = np.sum(w[3][i, :] * w[3][j, :] * w[3][k, :] * w[3][ell, :])
denominator = 2.0**(order-1) if use_diag else 1.0
ans += x_prod * w_prod / denominator
else:
assert False
return ans
def bruteforce_inference(self, X, w, b, use_diag):
assert len(w) <= 4
ans = X.dot(w[0]).flatten() + b
if len(w) > 1:
ans += self.bruteforce_inference_one_interaction(X, w, 2, use_diag)
if len(w) > 2:
ans += self.bruteforce_inference_one_interaction(X, w, 3, use_diag)
if len(w) > 3:
ans += self.bruteforce_inference_one_interaction(X, w, 4, use_diag)
return ans
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7599d085a82922b7f5562661cf2a3d2d",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 96,
"avg_line_length": 35.95454545454545,
"alnum_prop": 0.5152970922882427,
"repo_name": "geffy/tffm",
"id": "4890a32f40a59f6b82af5165e5feb0f2e9076efa",
"size": "3955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "122735"
},
{
"name": "Python",
"bytes": "37175"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
# Author: AHDS
# Creation date: 2016-06-23
# Modified by:
# Last modified date:
urlpatterns = [
url(r'^(\w+)/details/(\d+)', views.get_query_details, name='get_query_details'),
url(r'^(?P<app_id>[\w\-]+)', views.get_task_manager, name='get_task_manager'),
]
| {
"content_hash": "6afe23afdd0d797035342a02e1752e62",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 24.76923076923077,
"alnum_prop": 0.6490683229813664,
"repo_name": "ceos-seo/Data_Cube_v2",
"id": "2895caf273677f555c2676c5d5d7dcc72a8d2a96",
"size": "1327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/django_site_v2/data_cube_ui/apps/task_manager/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1959"
},
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "CSS",
"bytes": "772075"
},
{
"name": "GLSL",
"bytes": "165400"
},
{
"name": "HTML",
"bytes": "1457619"
},
{
"name": "JavaScript",
"bytes": "50036576"
},
{
"name": "Jupyter Notebook",
"bytes": "16917211"
},
{
"name": "Makefile",
"bytes": "6773"
},
{
"name": "Python",
"bytes": "1174107"
},
{
"name": "Shell",
"bytes": "7641"
}
],
"symlink_target": ""
} |
import datetime
from lxml import etree
import requests
from requests.exceptions import HTTPError
from .base import Item, Storage
from .http import HTTP_STORAGE_PARAMETERS, USERAGENT, prepare_auth, \
prepare_client_cert, prepare_verify
from .. import exceptions, log, utils
from ..utils.compat import text_type, to_native
dav_logger = log.get(__name__)
CALDAV_DT_FORMAT = '%Y%m%dT%H%M%SZ'
_path_reserved_chars = frozenset(utils.compat.urlquote(x, '')
for x in "/?#[]!$&'()*+,;=")
def _contains_quoted_reserved_chars(x):
for y in _path_reserved_chars:
if y in x:
dav_logger.debug('Unsafe character: {!r}'.format(y))
return True
return False
def _normalize_href(base, href):
'''Normalize the href to be a path only relative to hostname and
schema.'''
orig_href = href
base = to_native(base, 'utf-8')
href = to_native(href, 'utf-8')
if not href:
raise ValueError(href)
x = utils.compat.urlparse.urljoin(base, href)
x = utils.compat.urlparse.urlsplit(x).path
# Encoding issues:
# - https://github.com/owncloud/contacts/issues/581
# - https://github.com/Kozea/Radicale/issues/298
old_x = None
while old_x is None or x != old_x:
if _contains_quoted_reserved_chars(x):
break
old_x = x
x = utils.compat.urlunquote(x)
x = utils.compat.urlquote(x, '/@%:')
if orig_href == x:
dav_logger.debug('Already normalized: {!r}'.format(x))
else:
dav_logger.debug('Normalized URL from {!r} to {!r}'
.format(orig_href, x))
return x
class InvalidXMLResponse(exceptions.InvalidResponse):
pass
def _parse_xml(content):
p = etree.XMLParser(recover=True)
rv = etree.XML(content, parser=p)
if rv is None:
raise InvalidXMLResponse('Invalid XML encountered: {}\n'
'Double-check the URLs in your config.'
.format(p.error_log))
if p.error_log:
dav_logger.warning('Partially invalid XML response, some of your '
'items may be corrupted. Check the debug log and '
'consider switching servers. ({})'
.format(p.error_log))
return rv
def _merge_xml(items):
rv = items[0]
rv.extend(items[1:])
return rv
def _fuzzy_matches_mimetype(strict, weak):
# different servers give different getcontenttypes:
# "text/vcard", "text/x-vcard", "text/x-vcard; charset=utf-8",
# "text/directory;profile=vCard", "text/directory",
# "text/vcard; charset=utf-8"
if strict is None or weak is None:
return True
mediatype, subtype = strict.split('/')
if subtype in weak:
return True
return False
def _get_collection_from_url(url):
_, collection = url.rstrip('/').rsplit('/', 1)
return utils.compat.urlunquote(collection)
class Discover(object):
_namespace = None
_resourcetype = None
_homeset_xml = None
_homeset_tag = None
_well_known_uri = None
_collection_xml = """
<d:propfind xmlns:d="DAV:">
<d:prop>
<d:resourcetype />
</d:prop>
</d:propfind>
"""
def __init__(self, **kwargs):
if kwargs.pop('collection', None) is not None:
raise TypeError('collection argument must not be given.')
discover_args, _ = utils.split_dict(kwargs, lambda key: key in (
'url', 'username', 'password', 'verify', 'auth', 'useragent',
'verify_fingerprint', 'auth_cert',
))
self.session = DavSession(**discover_args)
self.kwargs = kwargs
def find_dav(self):
try:
response = self.session.request(
'GET', self._well_known_uri, allow_redirects=False,
headers=self.session.get_default_headers()
)
return response.headers.get('Location', '')
except (HTTPError, exceptions.Error):
# The user might not have well-known URLs set up and instead points
# vdirsyncer directly to the DAV server.
dav_logger.debug('Server does not support well-known URIs.')
return ''
def find_principal(self, url=None):
if url is None:
try:
return self.find_principal('')
except (HTTPError, exceptions.Error):
return self.find_principal(self.find_dav())
headers = self.session.get_default_headers()
headers['Depth'] = 0
body = """
<d:propfind xmlns:d="DAV:">
<d:prop>
<d:current-user-principal />
</d:prop>
</d:propfind>
"""
response = self.session.request('PROPFIND', url, headers=headers,
data=body)
root = _parse_xml(response.content)
rv = root.find('.//{DAV:}current-user-principal/{DAV:}href')
if rv is None:
raise InvalidXMLResponse()
return utils.compat.urlparse.urljoin(response.url, rv.text)
def find_home(self, url=None):
if url is None:
url = self.find_principal()
headers = self.session.get_default_headers()
headers['Depth'] = 0
response = self.session.request('PROPFIND', url,
headers=headers,
data=self._homeset_xml)
root = etree.fromstring(response.content)
# Better don't do string formatting here, because of XML namespaces
rv = root.find('.//' + self._homeset_tag + '/{*}href')
if rv is None:
raise InvalidXMLResponse()
return utils.compat.urlparse.urljoin(response.url, rv.text)
def find_collections(self, url=None):
if url is None:
url = self.find_home()
headers = self.session.get_default_headers()
headers['Depth'] = 1
r = self.session.request('PROPFIND', url, headers=headers,
data=self._collection_xml)
root = _parse_xml(r.content)
done = set()
for response in root.findall('{DAV:}response'):
props = _merge_xml(response.findall('{*}propstat/{*}prop'))
if props.find('{*}resourcetype/{*}' + self._resourcetype) is None:
continue
href = response.find('{*}href')
if href is None:
raise InvalidXMLResponse()
href = utils.compat.urlparse.urljoin(r.url, href.text)
if href not in done:
done.add(href)
yield {'href': href}
def discover(self):
for c in self.find_collections():
url = c['href']
collection = _get_collection_from_url(url)
storage_args = dict(self.kwargs)
storage_args.update({'url': url, 'collection': collection})
yield storage_args
def create(self, collection):
if collection is None:
collection = _get_collection_from_url(self.kwargs['url'])
for c in self.discover():
if c['collection'] == collection:
return c
home = self.find_home()
url = utils.compat.urlparse.urljoin(
home,
utils.compat.urlquote(collection, '/@')
)
try:
url = self._create_collection_impl(url)
except HTTPError as e:
raise NotImplementedError(e)
else:
rv = dict(self.kwargs)
rv['collection'] = collection
rv['url'] = url
return rv
def _create_collection_impl(self, url):
data = '''<?xml version="1.0" encoding="utf-8" ?>
<D:mkcol xmlns:D="DAV:" xmlns:C="{}">
<D:set>
<D:prop>
<D:resourcetype>
<D:collection/>
<C:{}/>
</D:resourcetype>
</D:prop>
</D:set>
</D:mkcol>
'''.format(self._namespace, self._resourcetype)
response = self.session.request(
'MKCOL',
url,
data=data,
headers=self.session.get_default_headers(),
)
return response.url
class CalDiscover(Discover):
_namespace = 'urn:ietf:params:xml:ns:caldav'
_resourcetype = 'calendar'
_homeset_xml = """
<d:propfind xmlns:d="DAV:" xmlns:c="urn:ietf:params:xml:ns:caldav">
<d:prop>
<c:calendar-home-set />
</d:prop>
</d:propfind>
"""
_homeset_tag = '{*}calendar-home-set'
_well_known_uri = '/.well-known/caldav/'
class CardDiscover(Discover):
_namespace = 'urn:ietf:params:xml:ns:carddav'
_resourcetype = 'addressbook'
_homeset_xml = """
<d:propfind xmlns:d="DAV:" xmlns:c="urn:ietf:params:xml:ns:carddav">
<d:prop>
<c:addressbook-home-set />
</d:prop>
</d:propfind>
"""
_homeset_tag = '{*}addressbook-home-set'
_well_known_uri = '/.well-known/carddav/'
class DavSession(object):
'''
A helper class to connect to DAV servers.
'''
def __init__(self, url, username='', password='', verify=True, auth=None,
useragent=USERAGENT, verify_fingerprint=None,
auth_cert=None):
if username and not password:
from ..utils.password import get_password
password = get_password(username, url)
self._settings = {
'auth': prepare_auth(auth, username, password),
'cert': prepare_client_cert(auth_cert),
}
self._settings.update(prepare_verify(verify, verify_fingerprint))
self.useragent = useragent
self.url = url.rstrip('/') + '/'
self.parsed_url = utils.compat.urlparse.urlparse(self.url)
self._session = None
def request(self, method, path, **kwargs):
url = self.url
if path:
url = utils.compat.urlparse.urljoin(self.url, path)
if self._session is None:
self._session = requests.session()
more = dict(self._settings)
more.update(kwargs)
return utils.http.request(method, url, session=self._session, **more)
def get_default_headers(self):
return {
'User-Agent': self.useragent,
'Content-Type': 'application/xml; charset=UTF-8'
}
class DavStorage(Storage):
__doc__ = '''
:param url: Base URL or an URL to a collection.
''' + HTTP_STORAGE_PARAMETERS + '''
:param unsafe_href_chars: Replace the given characters when generating
hrefs. Defaults to ``'@'``.
.. note::
Please also see :ref:`supported-servers`, as some servers may not work
well.
'''
# the file extension of items. Useful for testing against radicale.
fileext = None
# mimetype of items
item_mimetype = None
# XML to use when fetching multiple hrefs.
get_multi_template = None
# The LXML query for extracting results in get_multi
get_multi_data_query = None
# The Discover subclass to use
discovery_class = None
_session = None
_repr_attributes = ('username', 'url')
_property_table = {
'displayname': ('displayname', 'DAV:'),
}
def __init__(self, url, username='', password='', verify=True, auth=None,
useragent=USERAGENT, verify_fingerprint=None, auth_cert=None,
**kwargs):
super(DavStorage, self).__init__(**kwargs)
url = url.rstrip('/') + '/'
self.session = DavSession(url, username, password, verify, auth,
useragent, verify_fingerprint,
auth_cert)
# defined for _repr_attributes
self.username = username
self.url = url
@classmethod
def discover(cls, **kwargs):
d = cls.discovery_class(**kwargs)
return d.discover()
@classmethod
def create_collection(cls, collection, **kwargs):
d = cls.discovery_class(**kwargs)
return d.create(collection)
def _normalize_href(self, *args, **kwargs):
return _normalize_href(self.session.url, *args, **kwargs)
def _get_href(self, item):
href = utils.generate_href(item.ident)
return self._normalize_href(href + self.fileext)
def _is_item_mimetype(self, mimetype):
return _fuzzy_matches_mimetype(self.item_mimetype, mimetype)
def get(self, href):
((actual_href, item, etag),) = self.get_multi([href])
assert href == actual_href
return item, etag
def get_multi(self, hrefs):
hrefs = set(hrefs)
href_xml = []
for href in hrefs:
if href != self._normalize_href(href):
raise exceptions.NotFoundError(href)
href_xml.append('<D:href>{}</D:href>'.format(href))
if not href_xml:
return ()
data = self.get_multi_template.format(hrefs='\n'.join(href_xml))
response = self.session.request(
'REPORT',
'',
data=data,
headers=self.session.get_default_headers()
)
root = _parse_xml(response.content) # etree only can handle bytes
rv = []
hrefs_left = set(hrefs)
for href, etag, prop in self._parse_prop_responses(root):
raw = prop.find(self.get_multi_data_query)
if raw is None:
dav_logger.warning('Skipping {}, the item content is missing.'
.format(href))
continue
raw = raw.text or u''
if isinstance(raw, bytes):
raw = raw.decode(response.encoding)
if isinstance(etag, bytes):
etag = etag.decode(response.encoding)
try:
hrefs_left.remove(href)
except KeyError:
if href in hrefs:
dav_logger.warning('Server sent item twice: {}'
.format(href))
else:
dav_logger.warning('Server sent unsolicited item: {}'
.format(href))
else:
rv.append((href, Item(raw), etag))
for href in hrefs_left:
raise exceptions.NotFoundError(href)
return rv
def _put(self, href, item, etag):
headers = self.session.get_default_headers()
headers['Content-Type'] = self.item_mimetype
if etag is None:
headers['If-None-Match'] = '*'
else:
headers['If-Match'] = etag
response = self.session.request(
'PUT',
href,
data=item.raw.encode('utf-8'),
headers=headers
)
etag = response.headers.get('etag', None)
href = self._normalize_href(response.url)
if not etag:
# The server violated the RFC and didn't send an etag. This is
# technically a race-condition, but too many popular servers do it.
#
# ownCloud: https://github.com/owncloud/contacts/issues/920
dav_logger.debug('Server did not send etag, fetching {!r}'
.format(href))
item2, etag = self.get(href)
# We don't have the old etag, but we can sloppily compare item
# contents to see if the values changed.
if item2.hash != item.hash:
dav_logger.debug('Old content: {!r}'.format(item.raw))
dav_logger.debug('New content: {!r}'.format(item2.raw))
raise exceptions.WrongEtagError(
'While requesting the etag for {!r}, '
'the item content changed.'
)
return href, etag
def update(self, href, item, etag):
if etag is None:
raise ValueError('etag must be given and must not be None.')
href, etag = self._put(self._normalize_href(href), item, etag)
return etag
def upload(self, item):
href = self._get_href(item)
return self._put(href, item, None)
def delete(self, href, etag):
href = self._normalize_href(href)
headers = self.session.get_default_headers()
headers.update({
'If-Match': etag
})
self.session.request(
'DELETE',
href,
headers=headers
)
def _parse_prop_responses(self, root):
hrefs = set()
for response in root.iter('{DAV:}response'):
href = response.find('{DAV:}href')
if href is None:
dav_logger.error('Skipping response, href is missing.')
continue
href = self._normalize_href(href.text)
if href in hrefs:
# Servers that send duplicate hrefs:
# - Zimbra
# https://github.com/untitaker/vdirsyncer/issues/88
# - Davmail
# https://github.com/untitaker/vdirsyncer/issues/144
dav_logger.warning('Skipping identical href: {!r}'
.format(href))
continue
props = response.findall('{DAV:}propstat/{DAV:}prop')
if not props:
dav_logger.warning('Skipping {!r}, properties are missing.'
.format(href))
continue
else:
props = _merge_xml(props)
if props.find('{DAV:}resourcetype/{DAV:}collection') is not None:
dav_logger.debug('Skipping {!r}, is collection.'.format(href))
continue
etag = getattr(props.find('{DAV:}getetag'), 'text', '')
if not etag:
dav_logger.warning('Skipping {!r}, etag property is missing.'
.format(href))
contenttype = getattr(props.find('{DAV:}getcontenttype'),
'text', None)
if not self._is_item_mimetype(contenttype):
dav_logger.debug('Skipping {!r}, {!r} != {!r}.'
.format(href, contenttype,
self.item_mimetype))
continue
hrefs.add(href)
yield href, etag, props
def list(self):
headers = self.session.get_default_headers()
headers['Depth'] = 1
data = '''<?xml version="1.0" encoding="utf-8" ?>
<D:propfind xmlns:D="DAV:">
<D:prop>
<D:resourcetype/>
<D:getcontenttype/>
<D:getetag/>
</D:prop>
</D:propfind>
'''
# We use a PROPFIND request instead of addressbook-query due to issues
# with Zimbra. See https://github.com/untitaker/vdirsyncer/issues/83
response = self.session.request('PROPFIND', '', data=data,
headers=headers)
root = _parse_xml(response.content)
rv = self._parse_prop_responses(root)
for href, etag, prop in rv:
yield href, etag
def get_meta(self, key):
try:
tagname, namespace = self._property_table[key]
except KeyError:
raise exceptions.UnsupportedMetadataError()
lxml_selector = '{%s}%s' % (namespace, tagname)
data = '''<?xml version="1.0" encoding="utf-8" ?>
<D:propfind xmlns:D="DAV:">
<D:prop>
{}
</D:prop>
</D:propfind>
'''.format(
to_native(etree.tostring(etree.Element(lxml_selector)))
)
headers = self.session.get_default_headers()
headers['Depth'] = 0
response = self.session.request(
'PROPFIND', '',
data=data, headers=headers
)
root = _parse_xml(response.content)
for prop in root.findall('.//' + lxml_selector):
text = getattr(prop, 'text', None)
if text:
return text
def set_meta(self, key, value):
try:
tagname, namespace = self._property_table[key]
except KeyError:
raise exceptions.UnsupportedMetadataError()
lxml_selector = '{%s}%s' % (namespace, tagname)
element = etree.Element(lxml_selector)
element.text = value
data = '''<?xml version="1.0" encoding="utf-8" ?>
<D:propertyupdate xmlns:D="DAV:">
<D:set>
<D:prop>
{}
</D:prop>
</D:set>
</D:propertyupdate>
'''.format(to_native(etree.tostring(element)))
self.session.request(
'PROPPATCH', '',
data=data, headers=self.session.get_default_headers()
)
# XXX: Response content is currently ignored. Though exceptions are
# raised for HTTP errors, a multistatus with errorcodes inside is not
# parsed yet. Not sure how common those are, or how they look like. It
# might be easier (and safer in case of a stupid server) to just issue
# a PROPFIND to see if the value got actually set.
class CaldavStorage(DavStorage):
__doc__ = '''
CalDAV.
You can set a timerange to synchronize with the parameters ``start_date``
and ``end_date``. Inside those parameters, you can use any Python
expression to return a valid :py:class:`datetime.datetime` object. For
example, the following would synchronize the timerange from one year in the
past to one year in the future::
start_date = datetime.now() - timedelta(days=365)
end_date = datetime.now() + timedelta(days=365)
Either both or none have to be specified. The default is to synchronize
everything.
You can set ``item_types`` to restrict the *kind of items* you want to
synchronize. For example, if you want to only synchronize events (but don't
download any tasks from the server), set ``item_types = ["VEVENT"]``. If
you want to synchronize events and tasks, but have some ``VJOURNAL`` items
on the server you don't want to synchronize, use ``item_types = ["VEVENT",
"VTODO"]``.
:param start_date: Start date of timerange to show, default -inf.
:param end_date: End date of timerange to show, default +inf.
:param item_types: Kind of items to show. The default, the empty list, is
to show all. This depends on particular features on the server, the
results are not validated.
''' + DavStorage.__doc__
storage_name = 'caldav'
fileext = '.ics'
item_mimetype = 'text/calendar'
discovery_class = CalDiscover
start_date = None
end_date = None
get_multi_template = '''<?xml version="1.0" encoding="utf-8" ?>
<C:calendar-multiget xmlns:D="DAV:"
xmlns:C="urn:ietf:params:xml:ns:caldav">
<D:prop>
<D:getetag/>
<C:calendar-data/>
</D:prop>
{hrefs}
</C:calendar-multiget>'''
get_multi_data_query = '{urn:ietf:params:xml:ns:caldav}calendar-data'
_property_table = dict(DavStorage._property_table)
_property_table.update({
'color': ('calendar-color', 'http://apple.com/ns/ical/'),
})
def __init__(self, start_date=None, end_date=None,
item_types=(), **kwargs):
super(CaldavStorage, self).__init__(**kwargs)
if not isinstance(item_types, (list, tuple)):
raise exceptions.UserError('item_types must be a list.')
self.item_types = tuple(item_types)
if (start_date is None) != (end_date is None):
raise exceptions.UserError('If start_date is given, '
'end_date has to be given too.')
elif start_date is not None and end_date is not None:
namespace = dict(datetime.__dict__)
namespace['start_date'] = self.start_date = \
(eval(start_date, namespace)
if isinstance(start_date, (bytes, text_type))
else start_date)
self.end_date = \
(eval(end_date, namespace)
if isinstance(end_date, (bytes, text_type))
else end_date)
@staticmethod
def _get_list_filters(components, start, end):
if components:
caldavfilter = '''
<C:comp-filter name="VCALENDAR">
<C:comp-filter name="{component}">
{timefilter}
</C:comp-filter>
</C:comp-filter>
'''
if start is not None and end is not None:
start = start.strftime(CALDAV_DT_FORMAT)
end = end.strftime(CALDAV_DT_FORMAT)
timefilter = ('<C:time-range start="{start}" end="{end}"/>'
.format(start=start, end=end))
else:
timefilter = ''
for component in components:
yield caldavfilter.format(component=component,
timefilter=timefilter)
else:
if start is not None and end is not None:
for x in CaldavStorage._get_list_filters(('VTODO', 'VEVENT'),
start, end):
yield x
def list(self):
caldavfilters = list(self._get_list_filters(
self.item_types,
self.start_date,
self.end_date
))
if not caldavfilters:
# If we don't have any filters (which is the default), taking the
# risk of sending a calendar-query is not necessary. There doesn't
# seem to be a widely-usable way to send calendar-queries with the
# same semantics as a PROPFIND request... so why not use PROPFIND
# instead?
#
# See https://github.com/dmfs/tasks/issues/118 for backstory.
for x in DavStorage.list(self):
yield x
data = '''<?xml version="1.0" encoding="utf-8" ?>
<C:calendar-query xmlns:D="DAV:"
xmlns:C="urn:ietf:params:xml:ns:caldav">
<D:prop>
<D:getcontenttype/>
<D:getetag/>
</D:prop>
<C:filter>
{caldavfilter}
</C:filter>
</C:calendar-query>'''
headers = self.session.get_default_headers()
# https://github.com/untitaker/vdirsyncer/issues/166
# The default in CalDAV's calendar-queries is 0, but the examples use
# an explicit value of 1 for querying items. it is extremely unclear in
# the spec which values from WebDAV are actually allowed.
headers['Depth'] = 1
for caldavfilter in caldavfilters:
xml = data.format(caldavfilter=caldavfilter)
response = self.session.request('REPORT', '', data=xml,
headers=headers)
root = _parse_xml(response.content)
rv = self._parse_prop_responses(root)
for href, etag, prop in rv:
yield href, etag
class CarddavStorage(DavStorage):
__doc__ = '''
CardDAV.
''' + DavStorage.__doc__
storage_name = 'carddav'
fileext = '.vcf'
item_mimetype = 'text/vcard'
discovery_class = CardDiscover
get_multi_template = '''<?xml version="1.0" encoding="utf-8" ?>
<C:addressbook-multiget xmlns:D="DAV:"
xmlns:C="urn:ietf:params:xml:ns:carddav">
<D:prop>
<D:getetag/>
<C:address-data/>
</D:prop>
{hrefs}
</C:addressbook-multiget>'''
get_multi_data_query = '{urn:ietf:params:xml:ns:carddav}address-data'
| {
"content_hash": "4f005e95685a7b42839051b7aa8e204c",
"timestamp": "",
"source": "github",
"line_count": 821,
"max_line_length": 79,
"avg_line_length": 34.364190012180266,
"alnum_prop": 0.5374827207315777,
"repo_name": "tribut/vdirsyncer",
"id": "a778b1b8e95af842b76c7291d51933d7a504e88c",
"size": "28238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vdirsyncer/storage/dav.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1801"
},
{
"name": "Python",
"bytes": "209433"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
} |
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Formation'
copyright = u'2017, James Routley'
author = u'James Routley'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.0'
# The full version, including alpha/beta/rc tags.
release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"font_size": "14px",
"font_family": (
'-apple-system, BlinkMacSystemFont, '
'"Segoe UI", Helvetica, Arial, sans-serif, '
'"Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"'
),
"head_font_family": (
'-apple-system, BlinkMacSystemFont, '
'"Segoe UI", Helvetica, Arial, sans-serif, '
'"Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"'
),
"code_font_family":
'Consolas, "Liberation Mono", Menlo, Courier, monospace'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Formationdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Formation.tex', u'Formation Documentation',
u'James Routley', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'formation', u'Formation Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Formation', u'Formation Documentation',
author, 'Formation', 'One line description of project.',
'Miscellaneous'),
]
| {
"content_hash": "7d8fd9a01f5a63a0cf7eeb9f4d07551c",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 31.036496350364963,
"alnum_prop": 0.6549858889934148,
"repo_name": "jamesroutley/formation",
"id": "76dd126a47de05a8abb0be74aa745e970e20b4d2",
"size": "5306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "29555"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import xml.etree.ElementTree as ET
import pynos.versions.base.lldp
import pynos.utilities
class TestLLDP(unittest.TestCase):
"""
LLDP unit tests. Compare expected XML to generated XML.
"""
def setUp(self):
self.lldp = pynos.versions.base.lldp.LLDP(pynos.utilities.return_xml)
self.namespace = 'urn:brocade.com:mgmt:brocade-lldp-ext'
self.netconf_namespace = 'urn:ietf:params:xml:ns:netconf:base:1.0'
def lldp_neighbors_xml(self, *args):
message_id = 'urn:uuid:528cdf32-2e86-11e5-bb27-080027b782e4'
neighbor_xml = '<ns0:rpc-reply xmlns:ns0="{0}" xmlns:ns1="{1}" '\
'message-id="{2}"><ns1:lldp-neighbor-detail>'\
'<ns1:local-interface-name>Te 226/0/7'\
'</ns1:local-interface-name>'\
'<ns1:local-interface-ifindex>402882566'\
'</ns1:local-interface-ifindex>'\
'<ns1:local-interface-mac>0005.33e5.d764'\
'</ns1:local-interface-mac><ns1:remote-interface-name>'\
'port0</ns1:remote-interface-name>'\
'<ns1:remote-interface-mac>8c7c.ff02.f100'\
'</ns1:remote-interface-mac><ns1:dead-interval>120'\
'</ns1:dead-interval><ns1:remaining-life>102'\
'</ns1:remaining-life><ns1:remote-chassis-id>'\
'8c7c.ff02.f100</ns1:remote-chassis-id>'\
'<ns1:lldp-pdu-transmitted>5397'\
'</ns1:lldp-pdu-transmitted><ns1:lldp-pdu-received>'\
'5263</ns1:lldp-pdu-received>'\
'</ns1:lldp-neighbor-detail><ns1:has-more>false'\
'</ns1:has-more>'\
'</ns0:rpc-reply>'.format(self.netconf_namespace,
self.namespace,
message_id)
return ET.fromstring(neighbor_xml)
def test_neighbors(self):
expected = {'local-int-name': 'Te 226/0/7',
'local-int-mac': '0005.33e5.d764',
'remote-int-name': 'port0',
'remote-int-mac': '8c7c.ff02.f100',
'remote-chassis-id': '8c7c.ff02.f100',
'remote-system-name': 'placeholder'}
self.lldp._callback = self.lldp_neighbors_xml
results = self.lldp.neighbors
self.assertIsInstance(results, list)
self.assertDictEqual(expected, results[0])
| {
"content_hash": "d7240477c4e6cfb62af31d312f89c727",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 46.11594202898551,
"alnum_prop": 0.5725958516656191,
"repo_name": "SivagnanamCiena/pynos",
"id": "b876d48abbca03a123a911c6ce585644a2fc5901",
"size": "3204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/versions/base/test_lldp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20665905"
}
],
"symlink_target": ""
} |
index = []
def add_to_index(index,keyword,url):
flag = 0
count = 0
for lists in index:
if(lists[0]==keyword):
flag = 1
index[count][1].append(url)
count += 1
count = 0
if(flag == 0):
index.append([keyword,[url]])
add_to_index(index,'udacity','http://udacity.com')
print index
add_to_index(index,'udacity','http://mmnpr.org')
print index
add_to_index(index,'computing','http://acm.org')
print index
add_to_index(index,'udacity','http://npr.org')
print index
"""
output
[['udacity', ['http://udacity.com']]]
[['udacity', ['http://udacity.com', 'http://mmnpr.org']]]
[['udacity', ['http://udacity.com', 'http://mmnpr.org']], ['computing', ['http://acm.org']]]
[['udacity', ['http://udacity.com', 'http://mmnpr.org', 'http://npr.org']], ['computing', ['http://acm.org']]]
"""
"""
answer code
def add_to_index(index,keyword,url):
for entry in index:
if entry[0] == keyword:
entry[1].append(url)
return
index.append([keyword,[url]])
"""
| {
"content_hash": "7148628a551ab534c3ec3bd9d42f803b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 110,
"avg_line_length": 24.976744186046513,
"alnum_prop": 0.5577281191806331,
"repo_name": "akshaynagpal/python_web_crawler",
"id": "21066fd83ac9c60443c569689ed2e99e620c04f2",
"size": "1404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web crawler functions/add_to_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18257"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2010-2013, Ryan Fan <ryan.fan@oracle.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from __future__ import absolute_import
from celery import Celery
client = Celery()
client.conf.update(
BROKER_URL='redis://guest@127.0.0.1:6379//',
CELERY_RESULT_BACKEND='redis://127.0.0.1:6379/0',
CELERY_TASK_SERIALIZER='json',
CELERY_ACCEPT_CONTENT=['json'],
CELERY_RESULT_SERIALIZER='json',
CELERY_TIMEZONE='Asia/Shanghai',
)
| {
"content_hash": "0ce90fef0e5a7d76fee60dd5d0ebe338",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 37.233333333333334,
"alnum_prop": 0.7421665174574754,
"repo_name": "rfancn/wxgigo",
"id": "61fa7ba0520d79cc73edf9fd416d3330cdcbffa5",
"size": "1154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wxgigo/wxmp/misc/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2696"
},
{
"name": "HTML",
"bytes": "50544"
},
{
"name": "JavaScript",
"bytes": "4201"
},
{
"name": "Python",
"bytes": "356710"
},
{
"name": "Shell",
"bytes": "1220"
}
],
"symlink_target": ""
} |
from lib import action
import json
class ConsulAclListAction(action.ConsulBaseAction):
def run(self):
acl_list = self.consul.acl.list()
print json.dumps(acl_list)
| {
"content_hash": "d8907666b4cd0b48b397086989a35b1a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 51,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.6989247311827957,
"repo_name": "pearsontechnology/st2contrib",
"id": "e17f6cd18cac169cd577e12dceac29f8414b1a68",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packs/consul/actions/list_tokens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "HTML",
"bytes": "675"
},
{
"name": "Makefile",
"bytes": "4592"
},
{
"name": "Python",
"bytes": "913112"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "16263"
}
],
"symlink_target": ""
} |
import hashlib
import random
from django.core.mail import send_mail
from django.template.loader import render_to_string
from account.conf import settings
class AccountDefaultHookSet(object):
def send_invitation_email(self, to, ctx):
subject = render_to_string("account/email/invite_user_subject.txt", ctx)
message = render_to_string("account/email/invite_user.txt", ctx)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to)
def send_confirmation_email(self, to, ctx):
subject = render_to_string("account/email/email_confirmation_subject.txt", ctx)
subject = "".join(subject.splitlines()) # remove superfluous line breaks
message = render_to_string("account/email/email_confirmation_message.txt", ctx)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to)
def send_password_change_email(self, to, ctx):
subject = render_to_string("account/email/password_change_subject.txt", ctx)
subject = "".join(subject.splitlines())
message = render_to_string("account/email/password_change.txt", ctx)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to)
def send_password_reset_email(self, to, ctx):
subject = render_to_string("account/email/password_reset_subject.txt", ctx)
subject = "".join(subject.splitlines())
message = render_to_string("account/email/password_reset.txt", ctx)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to)
def generate_random_token(self, extra=None, hash_func=hashlib.sha256):
if extra is None:
extra = []
bits = extra + [str(random.SystemRandom().getrandbits(512))]
return hash_func("".join(bits).encode("utf-8")).hexdigest()
def generate_signup_code_token(self, email=None):
extra = []
if email:
extra.append(email)
return self.generate_random_token(extra)
def generate_email_confirmation_token(self, email):
return self.generate_random_token([email])
def get_user_credentials(self, form, identifier_field):
return {
"username": form.cleaned_data[identifier_field],
"password": form.cleaned_data["password"],
}
def account_delete_mark(self, deletion):
deletion.user.is_active = False
deletion.user.save()
def account_delete_expunge(self, deletion):
deletion.user.delete()
class HookProxy(object):
def __getattr__(self, attr):
return getattr(settings.ACCOUNT_HOOKSET, attr)
hookset = HookProxy()
| {
"content_hash": "08da7ae526041f8e35eac0b27d909771",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 87,
"avg_line_length": 36.885714285714286,
"alnum_prop": 0.668861347792409,
"repo_name": "GeoNode/geonode-user-accounts",
"id": "606eee73d4ab1fd413f8797d124aed63f570f542",
"size": "2582",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "account/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "617"
},
{
"name": "Python",
"bytes": "222386"
}
],
"symlink_target": ""
} |
"""
.. module:: colorsParser
:synopsis: Parse the colors.xml file and return a list of Color objects
grouped by color group.
.. moduleauthor:: Hazen Babcock
"""
import os
from xml.etree import ElementTree
class Color(object):
"""
Color information class.
"""
def __init__(self, color_node):
"""
:param color_node: A colors.xml ElementTree XML node containing the color information.
:type color_node: ElementTree.
"""
self.code = color_node.attrib["code"]
self.edge = color_node.attrib["edge"]
self.lego_color = color_node.attrib["lego_color"]
self.lego_id = color_node.attrib["lego_id"]
self.name = color_node.attrib["name"]
self.value = color_node.attrib["value"]
def getDescription(self):
return self.code + ", " + self.name
def getEdgeColor(self, scale = "1"):
"""
:param scale: (Optional) either 1 or 256.
:type scale: int.
:returns: tuple -- [r, g, b, a]
"""
return self.parseColor(self.edge, scale)
def getFaceColor(self, scale = "1"):
"""
:param scale: (Optional) either 1 or 256.
:type scale: int.
:returns: tuple -- [r, g, b, a]
"""
return self.parseColor(self.value, scale)
def parseColor(self, color_string, scale = "1"):
"""
:param color_string: A color string like "256,256,256,256".
:type color_string: str.
:param scale: (Optional) "256" or "1", defaults to "1".
:type scale: int.
:returns: tuple -- [r, g, b, a]
"""
if (scale == "1"):
return map(lambda x: float(x)/256.0, color_string.split(","))
else:
return map(int, color_string.split(","))
def loadColors(colors_file = None):
"""
Parse a colors.xml file and return a dictionary of Color objects
keyed by the color id.
"""
color_xml = loadColorsFile()
all_colors = {}
for color_group in color_xml.find("colors"):
cur_group = []
for color_entry in color_group:
color_obj = Color(color_entry)
all_colors[color_obj.code] = color_obj
return all_colors
def loadColorsFile(colors_file = None):
if colors_file is None:
# The colors.xml file is assumed to exist in the xml directory, one directory above this module.
colors_file = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0] + "/xml/colors.xml"
return ElementTree.parse(colors_file).getroot()
def loadColorGroups(colors_file = None):
"""
Parse a colors.xml file and return a list of Color of objects
grouped by color group.
"""
color_xml = loadColorsFile()
all_colors = []
for color_group in color_xml.find("colors"):
cur_group = []
for color_entry in color_group:
cur_group.append(Color(color_entry))
all_colors.append(cur_group)
return all_colors
if (__name__ == '__main__'):
all_colors = loadColors()
for key in sorted(all_colors.keys()):
print(key, all_colors[key].name)
#
# The MIT License
#
# Copyright (c) 2014 Hazen Babcock
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| {
"content_hash": "63f59611f5fa177043d8c234f1e03f11",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 104,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6323805060918463,
"repo_name": "HazenBabcock/opensdraw",
"id": "12c141557d386bcda8fe067ac84448bd760f4b86",
"size": "4290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opensdraw/lcad_lib/colorsParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "5595"
},
{
"name": "Python",
"bytes": "358957"
}
],
"symlink_target": ""
} |
from yapsy.IPlugin import IPlugin
from yapsy.PluginManager import PluginManager
from django.template import loader, Context
from django.db.models import Count
from server.models import *
from django.shortcuts import get_object_or_404
import server.utils as utils
class MunkiVersion(IPlugin):
def plugin_type(self):
return 'builtin'
def show_widget(self, page, machines=None, theid=None):
# The data is data is pulled from the database and passed to a template.
# There are three possible views we're going to be rendering to - front, bu_dashbaord and group_dashboard. If page is set to bu_dashboard, or group_dashboard, you will be passed a business_unit or machine_group id to use (mainly for linking to the right search).
if page == 'front':
t = loader.get_template('munkiversion/templates/front.html')
if not machines:
machines = Machine.objects.all()
if page == 'bu_dashboard':
t = loader.get_template('munkiversion/templates/id.html')
if not machines:
machines = utils.getBUmachines(theid)
if page == 'group_dashboard':
t = loader.get_template('munkiversion/templates/id.html')
if not machines:
machine_group = get_object_or_404(MachineGroup, pk=theid)
machines = Machine.objects.filter(machine_group=machine_group)
if machines:
munki_info = machines.values('munki_version').annotate(count=Count('munki_version')).order_by()
else:
munki_info = []
c = Context({
'title': 'Munki Version',
'data': munki_info,
'theid': theid,
'page': page
})
return t.render(c), 4
def filter_machines(self, machines, data):
# You will be passed a QuerySet of machines, you then need to perform some filtering based on the 'data' part of the url from the show_widget output. Just return your filtered list of machines and the page title.
machines = machines.filter(munki_version__exact=data)
title = 'Machines running version '+data+' of MSC'
return machines, title
| {
"content_hash": "e24120845d5620d20d344c75fb399f9a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 270,
"avg_line_length": 42.79245283018868,
"alnum_prop": 0.6256613756613757,
"repo_name": "macjustice/sal",
"id": "65677d90312103adf292103d14bc4f4b1eec9cc9",
"size": "2268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/plugins/munkiversion/munkiversion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189639"
},
{
"name": "HTML",
"bytes": "88321"
},
{
"name": "JavaScript",
"bytes": "682521"
},
{
"name": "Makefile",
"bytes": "1143"
},
{
"name": "Nginx",
"bytes": "1955"
},
{
"name": "Python",
"bytes": "169952"
},
{
"name": "Shell",
"bytes": "1573"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import os
import re
from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.longrunning import operations_pb2
from google.cloud.talent_v4beta1.types import common, completion_service
from .transports.base import DEFAULT_CLIENT_INFO, CompletionTransport
from .transports.grpc import CompletionGrpcTransport
from .transports.grpc_asyncio import CompletionGrpcAsyncIOTransport
class CompletionClientMeta(type):
"""Metaclass for the Completion client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[CompletionTransport]]
_transport_registry["grpc"] = CompletionGrpcTransport
_transport_registry["grpc_asyncio"] = CompletionGrpcAsyncIOTransport
def get_transport_class(
cls,
label: str = None,
) -> Type[CompletionTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CompletionClient(metaclass=CompletionClientMeta):
"""A service handles auto completion."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "jobs.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CompletionClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CompletionClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CompletionTransport:
"""Returns the transport used by the client instance.
Returns:
CompletionTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def company_path(
project: str,
tenant: str,
company: str,
) -> str:
"""Returns a fully-qualified company string."""
return "projects/{project}/tenants/{tenant}/companies/{company}".format(
project=project,
tenant=tenant,
company=company,
)
@staticmethod
def parse_company_path(path: str) -> Dict[str, str]:
"""Parses a company path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/companies/(?P<company>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CompletionTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the completion client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, CompletionTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CompletionTransport):
# transport is a CompletionTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def complete_query(
self,
request: Union[completion_service.CompleteQueryRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> completion_service.CompleteQueryResponse:
r"""Completes the specified prefix with keyword
suggestions. Intended for use by a job search
auto-complete search box.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import talent_v4beta1
def sample_complete_query():
# Create a client
client = talent_v4beta1.CompletionClient()
# Initialize request argument(s)
request = talent_v4beta1.CompleteQueryRequest(
parent="parent_value",
query="query_value",
page_size=951,
)
# Make the request
response = client.complete_query(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.talent_v4beta1.types.CompleteQueryRequest, dict]):
The request object. Auto-complete parameters.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.types.CompleteQueryResponse:
Response of auto-complete query.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a completion_service.CompleteQueryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, completion_service.CompleteQueryRequest):
request = completion_service.CompleteQueryRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.complete_query]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
def get_operation(
self,
request: operations_pb2.GetOperationRequest = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Gets the latest state of a long-running operation.
Args:
request (:class:`~.operations_pb2.GetOperationRequest`):
The request object. Request message for
`GetOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.GetOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-talent",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CompletionClient",)
| {
"content_hash": "7d47d17d6bdb837965a0e6dfd0e3752d",
"timestamp": "",
"source": "github",
"line_count": 575,
"max_line_length": 107,
"avg_line_length": 39.469565217391306,
"alnum_prop": 0.6152015862524786,
"repo_name": "googleapis/python-talent",
"id": "e47f291a7f340af83a2cf1ddd36461e739c51f83",
"size": "23295",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/talent_v4beta1/services/completion/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2538179"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from builtins import range
from six.moves import cPickle as pickle
import numpy as np
import os
from scipy.misc import imread
import platform
def load_pickle(f):
version = platform.python_version_tuple()
if version[0] == '2':
return pickle.load(f)
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
def load_CIFAR_batch(filename):
""" load single batch of cifar """
with open(filename, 'rb') as f:
datadict = load_pickle(f)
X = datadict['data']
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("float")
Y = np.array(Y)
return X, Y
def load_CIFAR10(ROOT):
""" load all of cifar """
xs = []
ys = []
for b in range(1,6):
f = os.path.join(ROOT, 'data_batch_%d' % (b, ))
X, Y = load_CIFAR_batch(f)
xs.append(X)
ys.append(Y)
Xtr = np.concatenate(xs)
Ytr = np.concatenate(ys)
del X, Y
Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))
return Xtr, Ytr, Xte, Yte
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000,
subtract_mean=True):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for classifiers. These are the same steps as we used for the SVM, but
condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
if subtract_mean:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Transpose so that channels come first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
# Package data into a dictionary
return {
'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val,
'X_test': X_test, 'y_test': y_test,
}
def load_tiny_imagenet(path, dtype=np.float32, subtract_mean=True):
"""
Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and
TinyImageNet-200 have the same directory structure, so this can be used
to load any of them.
Inputs:
- path: String giving path to the directory to load.
- dtype: numpy datatype used to load the data.
- subtract_mean: Whether to subtract the mean training image.
Returns: A dictionary with the following entries:
- class_names: A list where class_names[i] is a list of strings giving the
WordNet names for class i in the loaded dataset.
- X_train: (N_tr, 3, 64, 64) array of training images
- y_train: (N_tr,) array of training labels
- X_val: (N_val, 3, 64, 64) array of validation images
- y_val: (N_val,) array of validation labels
- X_test: (N_test, 3, 64, 64) array of testing images.
- y_test: (N_test,) array of test labels; if test labels are not available
(such as in student code) then y_test will be None.
- mean_image: (3, 64, 64) array giving mean training image
"""
# First load wnids
with open(os.path.join(path, 'wnids.txt'), 'r') as f:
wnids = [x.strip() for x in f]
# Map wnids to integer labels
wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}
# Use words.txt to get names for each class
with open(os.path.join(path, 'words.txt'), 'r') as f:
wnid_to_words = dict(line.split('\t') for line in f)
for wnid, words in wnid_to_words.items():
wnid_to_words[wnid] = [w.strip() for w in words.split(',')]
class_names = [wnid_to_words[wnid] for wnid in wnids]
# Next load training data.
X_train = []
y_train = []
for i, wnid in enumerate(wnids):
if (i + 1) % 20 == 0:
print('loading training data for synset %d / %d'
% (i + 1, len(wnids)))
# To figure out the filenames we need to open the boxes file
boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)
with open(boxes_file, 'r') as f:
filenames = [x.split('\t')[0] for x in f]
num_images = len(filenames)
X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)
y_train_block = wnid_to_label[wnid] * \
np.ones(num_images, dtype=np.int64)
for j, img_file in enumerate(filenames):
img_file = os.path.join(path, 'train', wnid, 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
## grayscale file
img.shape = (64, 64, 1)
X_train_block[j] = img.transpose(2, 0, 1)
X_train.append(X_train_block)
y_train.append(y_train_block)
# We need to concatenate all training data
X_train = np.concatenate(X_train, axis=0)
y_train = np.concatenate(y_train, axis=0)
# Next load validation data
with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:
img_files = []
val_wnids = []
for line in f:
img_file, wnid = line.split('\t')[:2]
img_files.append(img_file)
val_wnids.append(wnid)
num_val = len(img_files)
y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])
X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'val', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_val[i] = img.transpose(2, 0, 1)
# Next load test images
# Students won't have test labels, so we need to iterate over files in the
# images directory.
img_files = os.listdir(os.path.join(path, 'test', 'images'))
X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)
for i, img_file in enumerate(img_files):
img_file = os.path.join(path, 'test', 'images', img_file)
img = imread(img_file)
if img.ndim == 2:
img.shape = (64, 64, 1)
X_test[i] = img.transpose(2, 0, 1)
y_test = None
y_test_file = os.path.join(path, 'test', 'test_annotations.txt')
if os.path.isfile(y_test_file):
with open(y_test_file, 'r') as f:
img_file_to_wnid = {}
for line in f:
line = line.split('\t')
img_file_to_wnid[line[0]] = line[1]
y_test = [wnid_to_label[img_file_to_wnid[img_file]]
for img_file in img_files]
y_test = np.array(y_test)
mean_image = X_train.mean(axis=0)
if subtract_mean:
X_train -= mean_image[None]
X_val -= mean_image[None]
X_test -= mean_image[None]
return {
'class_names': class_names,
'X_train': X_train,
'y_train': y_train,
'X_val': X_val,
'y_val': y_val,
'X_test': X_test,
'y_test': y_test,
'class_names': class_names,
'mean_image': mean_image,
}
def load_models(models_dir):
"""
Load saved models from disk. This will attempt to unpickle all files in a
directory; any files that give errors on unpickling (such as README.txt)
will be skipped.
Inputs:
- models_dir: String giving the path to a directory containing model files.
Each model file is a pickled dictionary with a 'model' field.
Returns:
A dictionary mapping model file names to models.
"""
models = {}
for model_file in os.listdir(models_dir):
with open(os.path.join(models_dir, model_file), 'rb') as f:
try:
models[model_file] = load_pickle(f)['model']
except pickle.UnpicklingError:
continue
return models
| {
"content_hash": "fececf64d9ea5ac7d05197fc473fb0b0",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 79,
"avg_line_length": 35.6025641025641,
"alnum_prop": 0.5852838794862562,
"repo_name": "miguelfrde/stanford-cs231n",
"id": "a0b6136de55ff318728ead6be81bcf715ad17f10",
"size": "8331",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "assignment2/cs231n/data_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "24203940"
},
{
"name": "Python",
"bytes": "308430"
},
{
"name": "Shell",
"bytes": "4902"
}
],
"symlink_target": ""
} |
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.io.excel import ExcelFile
xlrd = pytest.importorskip("xlrd")
xlwt = pytest.importorskip("xlwt")
@pytest.fixture(autouse=True)
def skip_ods_files(read_ext):
if read_ext == ".ods":
pytest.skip("Not valid for xlrd")
def test_read_xlrd_book(read_ext, frame):
df = frame
engine = "xlrd"
sheet_name = "SheetA"
with ensure_clean(read_ext) as pth:
df.to_excel(pth, sheet_name)
book = xlrd.open_workbook(pth)
with ExcelFile(book, engine=engine) as xl:
result = pd.read_excel(xl, sheet_name, index_col=0)
tm.assert_frame_equal(df, result)
result = pd.read_excel(book, sheet_name=sheet_name, engine=engine, index_col=0)
tm.assert_frame_equal(df, result)
# TODO: test for openpyxl as well
def test_excel_table_sheet_by_index(datapath, read_ext):
path = datapath("io", "data", "test1{}".format(read_ext))
with pd.ExcelFile(path) as excel:
with pytest.raises(xlrd.XLRDError):
pd.read_excel(excel, "asdf")
| {
"content_hash": "bcef9c596fc92cc3cd5ffcd9dc798225",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 87,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6585365853658537,
"repo_name": "toobaz/pandas",
"id": "c4d99c827318debc8199c44cb43db730637bf153",
"size": "1148",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandas/tests/io/excel/test_xlrd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394843"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "15031623"
},
{
"name": "Shell",
"bytes": "27585"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
from django.template.loader import render_to_string
from django.utils.translation import (
pgettext_lazy, ugettext, ugettext_lazy as _)
import jinja2
import six
from olympia import amo
from olympia.amo.templatetags.jinja_helpers import urlparams
from olympia.amo.urlresolvers import reverse
@jinja2.contextfunction
def install_button(context, addon, version=None,
show_warning=True, src='', collection=None, size='',
detailed=False, impala=False, show_download_anyway=False):
"""
If version isn't given, we use the latest version.
"""
if not collection:
collection = None
request = context['request']
app, lang = context['APP'], context['LANG']
src = src or context.get('src') or request.GET.get('src', '')
collection_uuid = (
getattr(collection, 'uuid', None) or
collection or
context.get('collection') or
request.GET.get('collection') or
request.GET.get('collection_id') or
request.GET.get('collection_uuid'))
button = install_button_factory(
addon, app, lang, version=version,
show_warning=show_warning, src=src, collection=collection_uuid,
size=size, detailed=detailed, impala=impala,
show_download_anyway=show_download_anyway)
installed = (request.user.is_authenticated and
addon.id in request.user.mobile_addons)
context = {
'button': button, 'addon': addon, 'version': button.version,
'installed': installed
}
if impala:
template = 'addons/impala/button.html'
else:
template = 'addons/button.html'
return jinja2.Markup(render_to_string(template, context, request=request))
@jinja2.contextfunction
def big_install_button(context, addon, **kwargs):
from olympia.addons.templatetags.jinja_helpers import statusflags
flags = jinja2.escape(statusflags(context, addon))
button = install_button(
context, addon, detailed=True, show_download_anyway=True,
size='prominent', **kwargs)
markup = u'<div class="install-wrapper %s">%s</div>' % (flags, button)
return jinja2.Markup(markup)
def install_button_factory(*args, **kwargs):
button = InstallButton(*args, **kwargs)
# Order matters. We want to highlight unreviewed before featured. They
# should be mutually exclusive, but you never know.
classes = (('is_persona', PersonaInstallButton),
('unreviewed', UnreviewedInstallButton),
('experimental', ExperimentalInstallButton),
('featured', FeaturedInstallButton))
for pred, cls in classes:
if getattr(button, pred, False):
button.__class__ = cls
break
button.prepare()
return button
class InstallButton(object):
button_class = ['download']
install_class = []
install_text = ''
def __init__(self, addon, app, lang, version=None,
show_warning=True, src='', collection=None, size='',
detailed=False, impala=False, show_download_anyway=False):
self.addon, self.app, self.lang = addon, app, lang
self.latest = version is None
self.version = version
if not self.version:
self.version = addon.current_version
self.src = src
self.collection = collection
self.size = size
self.detailed = detailed
self.show_download_anyway = show_download_anyway
self.impala = impala
version_unreviewed = self.version and self.version.is_unreviewed
self.experimental = addon.is_experimental
self.unreviewed = addon.is_unreviewed() or version_unreviewed
self.featured = (not self.unreviewed and
not self.experimental and
addon.is_featured(app, lang))
self.is_persona = addon.type == amo.ADDON_PERSONA
self.show_warning = show_warning and self.unreviewed
def prepare(self):
"""Called after the class is set to manage additional properties."""
# Get a copy for this instance.
self.button_class = list(self.__class__.button_class)
self.install_class = list(self.__class__.install_class)
if self.size:
self.button_class.append(self.size)
def attrs(self):
rv = {}
addon = self.addon
if addon.type == amo.ADDON_SEARCH:
rv['data-search'] = 'true'
if addon.type in amo.NO_COMPAT:
rv['data-no-compat-necessary'] = 'true'
return rv
def links(self):
if not self.version:
return []
rv = []
files = [f for f in self.version.all_files
if f.status in amo.VALID_FILE_STATUSES]
for file in files:
text, url, download_url, os = self.file_details(file)
rv.append(Link(text, self.fix_link(url),
self.fix_link(download_url), os, file))
return rv
def file_details(self, file):
platform = file.platform
if self.latest and (
self.addon.status == file.status == amo.STATUS_PUBLIC):
url = file.latest_xpi_url()
download_url = file.latest_xpi_url(attachment=True)
else:
url = file.get_url_path(self.src)
download_url = file.get_url_path(self.src, attachment=True)
if platform == amo.PLATFORM_ALL.id:
text, os = ugettext('Download Now'), None
else:
text, os = ugettext('Download'), amo.PLATFORMS[platform]
return text, url, download_url, os
def fix_link(self, url):
if self.src:
url = urlparams(url, src=self.src)
if self.collection:
url = urlparams(url, collection_id=self.collection)
return url
class FeaturedInstallButton(InstallButton):
install_class = ['featuredaddon']
install_text = _(u'Featured')
class UnreviewedInstallButton(InstallButton):
install_class = ['unreviewed']
install_text = pgettext_lazy('install_button', u'Not Reviewed')
button_class = 'download caution'.split()
class ExperimentalInstallButton(InstallButton):
install_class = ['lite']
button_class = ['caution']
install_text = pgettext_lazy('install_button', u'Experimental')
class PersonaInstallButton(InstallButton):
install_class = ['persona']
def links(self):
return [
Link(
ugettext(u'Add to {0}').format(six.text_type(self.app.pretty)),
reverse('addons.detail', args=[amo.PERSONAS_ADDON_ID])
)
]
def attrs(self):
rv = super(PersonaInstallButton, self).attrs()
rv['data-browsertheme'] = self.addon.persona.json_data
return rv
class Link(object):
def __init__(self, text, url, download_url=None, os=None, file=None):
self.text, self.url, self.download_url, self.os, self.file = (
text, url, download_url, os, file)
| {
"content_hash": "2a28d3f987fc7c88df939235616e8407",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 79,
"avg_line_length": 34.99,
"alnum_prop": 0.6176050300085739,
"repo_name": "aviarypl/mozilla-l10n-addons-server",
"id": "007016a603d7f0acb0ffebf9b163e1b4f21f4596",
"size": "6998",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/olympia/addons/buttons.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "809734"
},
{
"name": "Dockerfile",
"bytes": "2898"
},
{
"name": "HTML",
"bytes": "515798"
},
{
"name": "JavaScript",
"bytes": "1070508"
},
{
"name": "Makefile",
"bytes": "827"
},
{
"name": "PLSQL",
"bytes": "316"
},
{
"name": "PLpgSQL",
"bytes": "10596"
},
{
"name": "Python",
"bytes": "5462821"
},
{
"name": "SQLPL",
"bytes": "645"
},
{
"name": "Shell",
"bytes": "8821"
},
{
"name": "Smarty",
"bytes": "1388"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import sys, numpy as np
from numpy import complex128, zeros
def sf2f_rf(ff, eps, w2f, wab2sf):
from pyscf.nao.m_lorentzian import lorentzian
""" Get a function F(w) from it's spectral function A(w) in case of response-like function/spectral function
ff -- frequencies list or array at which the function F(w) needs to be computed.
eps -- broadening
w2f -- frequencies at which the spectral function is given
wab2sf -- the collection of spectral functions
"""
f2f = zeros([len(ff)]+list(wab2sf.shape[1:]), dtype=complex128)
for i,f in enumerate(ff):
for w,fp in enumerate(w2f):
f2f[i] += wab2sf[w]*lorentzian(f, fp, eps)
return f2f
| {
"content_hash": "a36818edab9920003cd09204c52480f3",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 110,
"avg_line_length": 38.31578947368421,
"alnum_prop": 0.6964285714285714,
"repo_name": "gkc1000/pyscf",
"id": "9f8e3502cd12348e7bee7b712c234d3a99e9cb4d",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyscf/nao/m_sf2f_rf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2749942"
},
{
"name": "C++",
"bytes": "20522"
},
{
"name": "CMake",
"bytes": "29300"
},
{
"name": "Common Lisp",
"bytes": "40269"
},
{
"name": "Cuda",
"bytes": "12405"
},
{
"name": "Fortran",
"bytes": "1104054"
},
{
"name": "Jupyter Notebook",
"bytes": "42844"
},
{
"name": "Makefile",
"bytes": "6797"
},
{
"name": "Python",
"bytes": "10739278"
},
{
"name": "Shell",
"bytes": "5480"
},
{
"name": "VBA",
"bytes": "577"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.