text stringlengths 4 1.02M | meta dict |
|---|---|
import unittest
import sys
import os
import PRESUBMIT
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..'))
from PRESUBMIT_test_mocks import MockInputApi, MockOutputApi, MockAffectedFile
class PresubmitTest(unittest.TestCase):
def testCheckForDoctypeHTML(self):
"""This verifies that we correctly identify missing DOCTYPE html tags.
"""
file1 = MockAffectedFile("some/dir/file1.html", [
"<!DOCTYPE html>", "<html>", "<body>", "<p>Test</p>", "</body>",
"</html>"
])
file2 = MockAffectedFile(
"some/dir2/file2.html",
["<html>", "<body>", "<p>Test</p>", "</body>", "</html>"])
file3 = MockAffectedFile("file3.html", [
"<!--Some comment-->", "<!docTYPE htML>", "<html>", "<body>",
"<p>Test</p>", "</body>", "</html>"
])
file4 = MockAffectedFile("dir/file4.html",
["<script></script>", "<!DOCTYPE html>"])
file5 = MockAffectedFile("file5.html", [])
file6 = MockAffectedFile(
"file6.not_html",
["<html>", "<body>", "<p>Test</p>", "</body>", "</html>"])
file7 = MockAffectedFile("file7.html", [
"<!DOCTYPE html >", "<html>", "<body>", "<p>Test</p>", "</body>",
"</html>"
])
file8 = MockAffectedFile("file8.html", [
"<!DOCTYPE html FOOBAR>", "<html>", "<body>", "<p>Test</p>",
"</body>", "</html>"
])
file9 = MockAffectedFile(
"some/dir/quirk-file9.html",
["<html>", "<body>", "<p>Test</p>", "</body>", "</html>"])
file10 = MockAffectedFile(
"old/file10.html",
["<html>", "<body>", "<p>New content</p>", "</body>", "</html>"],
["<html>", "<body>", "<p>Old content</p>", "</body>", "</html>"],
action="M")
mock_input_api = MockInputApi()
mock_input_api.files = [
file1, file2, file3, file4, file5, file6, file7, file8, file9,
file10
]
messages = PRESUBMIT._CheckForDoctypeHTML(mock_input_api,
MockOutputApi())
self.assertEqual(4, len(messages))
for i, file in enumerate([file2, file4, file5, file8]):
self.assertEqual("error", messages[i].type)
self.assertIn("\"%s\"" % file.LocalPath(), messages[i].message)
def testCheckForDoctypeHTMLExceptions(self):
"""This test makes sure that we don't raise <!DOCTYPE html> errors
for WPT importer.
"""
error_file = MockAffectedFile(
"some/dir/doctype_error.html",
["<html>", "<body>", "<p>Test</p>", "</body>", "</html>"])
mock_input_api = MockInputApi()
mock_input_api.files = [error_file]
mock_input_api.change.author_email = \
"wpt-autoroller@chops-service-accounts.iam.gserviceaccount.com"
messages = PRESUBMIT._CheckForDoctypeHTML(mock_input_api,
MockOutputApi())
self.assertEqual(1, len(messages))
self.assertEqual("warning", messages[0].type)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "ac705bd1183fe96e3875756773b37f16",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 39.57831325301205,
"alnum_prop": 0.5092846270928463,
"repo_name": "chromium/chromium",
"id": "a4c9603c8919e213c167edb1a0cf0e80e05b9e67",
"size": "3449",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/PRESUBMIT_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import logging
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from src.data_ops.wrapping import wrap
from src.architectures.nmp.message_passing.vertex_update import GRUUpdate
from src.admin.utils import memory_snapshot
#from src.misc.grad_mode import no_grad
def conv_and_pad(in_planes, out_planes, kernel_size=17, stride=1):
# "3x3 convolution with padding"
padding = (kernel_size - 1) // 2
return nn.Conv1d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
m = OrderedDict()
m['conv1'] = conv_and_pad(inplanes, planes, stride)
m['bn1'] = nn.BatchNorm1d(planes)
m['relu1'] = nn.ReLU(inplace=True)
m['conv2'] = conv_and_pad(planes, planes)
m['bn2'] = nn.BatchNorm1d(planes)
self.group1 = nn.Sequential(m)
self.relu= nn.Sequential(nn.ReLU(inplace=True))
self.downsample = downsample
def forward(self, x):
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
out = self.group1(x) + residual
out = self.relu(out)
del residual
return out
def squared_distance_matrix(x, y):
'''
Calculate the pairwise squared distances between two batches of matrices x and y.
Input
x: a tensor of shape (bs, n, d)
y: a tensor of shape (bs, m, d)
Output
dist: a tensor of shape (bs, n, m) where dist[i,j,k] = || x[i,j] - y[i,k] || ^ 2
'''
bs = x.size(0)
assert bs == y.size(0)
n = x.size(1)
m = y.size(1)
d = x.size(2)
assert d == y.size(2)
x = x.unsqueeze(2).expand(bs, n, m, d)
y = y.unsqueeze(1).expand(bs, n, m, d)
dist = torch.pow(x - y, 2).sum(3)
return dist
class ConvolutionalNMPBlock(nn.Module):
def __init__(self, dim):
super().__init__()
self.spatial_embedding = nn.Linear(dim, 3)
self.conv1d = BasicBlock(dim, dim)
self.message = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(inplace=True)
)
self.update = GRUUpdate(2 * dim, dim)
def forward(self, x, mask):
x_conv = self.conv1d(x.transpose(1,2)).transpose(1,2)
s = self.spatial_embedding(x)
A = torch.exp( - squared_distance_matrix(s, s) ) * mask
x_nmp = torch.bmm(A, self.message(x))
x_in = torch.cat([x_conv, x_nmp], -1)
x = self.update(x, x_in)
return x
class BasicNMPBlock(nn.Module):
def __init__(self, dim):
super().__init__()
self.spatial_embedding = nn.Linear(dim, 3)
self.message = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(inplace=True)
)
self.update = GRUUpdate(dim, dim)
def forward(self, x, mask):
s = self.spatial_embedding(x)
A = torch.exp( - squared_distance_matrix(s, s) ) * mask
x_nmp = torch.bmm(A, self.message(x))
x = self.update(x, x_nmp)
return x
class ConvolutionOnlyBlock(nn.Module):
def __init__(self, dim):
super().__init__()
self.conv1d = BasicBlock(dim, dim)
def forward(self, x, mask):
x = self.conv1d(x.transpose(1,2)).transpose(1,2)
return x
class GraphGen(nn.Module):
def __init__(self,
features=None,
hidden=None,
iters=None,
no_grad=False,
tied=False,
block=None,
**kwargs
):
super().__init__()
self.no_grad = no_grad
self.initial_embedding = nn.Linear(features, hidden)
if block == 'cnmp':
NMPBlock = ConvolutionalNMPBlock
elif block == 'nmp':
NMPBlock = BasicNMPBlock
elif block == 'conv':
NMPBlock = ConvolutionOnlyBlock
else:
raise ValueError
self.final_spatial_embedding = nn.Linear(hidden, 3)
if tied:
nmp_block = NMPBlock(hidden)
nmp_block.spatial_embedding = self.final_spatial_embedding
self.nmp_blocks = nn.ModuleList([nmp_block] * iters)
else:
self.nmp_blocks = nn.ModuleList([NMPBlock(hidden) for _ in range(iters)])
#self.scale = nn.Parameter(torch.zeros(1))
self.scale = wrap(torch.zeros(1))
def forward(self, x, mask=None, **kwargs):
if self.no_grad:
A = self.forward_no_grad(x, mask, **kwargs)
else:
A = self.forward_with_grad(x, mask, **kwargs)
return A
def forward_with_grad(self, x, mask, **kwargs):
x = self.initial_embedding(x)
for nmp in self.nmp_blocks:
x = nmp(x, mask)
s = self.final_spatial_embedding(x)
A = torch.exp( - squared_distance_matrix(s,s) * torch.exp(self.scale)) * mask
return A
def forward_no_grad(self, x, mask, **kwargs):
n_volatile_layers = np.random.randint(0, len(self.nmp_blocks))
if n_volatile_layers > 0:
x = Variable(x.data, volatile=True)
x = self.initial_embedding(x)
for i in range(n_volatile_layers):
nmp = self.nmp_blocks[i]
x = nmp(x, mask)
x = Variable(x.data)
x = self.nmp_blocks[n_volatile_layers](x, mask)
else:
x = self.initial_embedding(x)
s = self.final_spatial_embedding(x)
A = torch.exp( - squared_distance_matrix(s,s) * torch.exp(self.scale)) * mask
return A
| {
"content_hash": "9962678aab2cd2e03bd8a284b8a43f1d",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 112,
"avg_line_length": 28,
"alnum_prop": 0.564795918367347,
"repo_name": "isaachenrion/jets",
"id": "a89b6ae54a533df8394565f0d775665baf9fe667",
"size": "5880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/proteins/models/graphgen/graphgen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11751"
},
{
"name": "Python",
"bytes": "258548"
},
{
"name": "Shell",
"bytes": "6358"
}
],
"symlink_target": ""
} |
from .logger import Logger
from .metrics_collector import MetricsCollector
from .null import Null
from .statsd import Statsd
__all__ = [Logger, MetricsCollector, Null, Statsd]
| {
"content_hash": "d398821a9c40cfd3a7d5b5273d9f2924",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 50,
"avg_line_length": 29.5,
"alnum_prop": 0.7853107344632768,
"repo_name": "wiki-ai/ores",
"id": "1ceb0a3db3e54293df5fdb35a759130382ab878c",
"size": "177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ores/metrics_collectors/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "433"
},
{
"name": "Dockerfile",
"bytes": "481"
},
{
"name": "HTML",
"bytes": "9290"
},
{
"name": "JavaScript",
"bytes": "5003"
},
{
"name": "Jupyter Notebook",
"bytes": "44108"
},
{
"name": "Makefile",
"bytes": "276"
},
{
"name": "Python",
"bytes": "157474"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class EsimProfileList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the EsimProfileList
:param Version version: Version that contains the resource
:returns: twilio.rest.supersim.v1.esim_profile.EsimProfileList
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileList
"""
super(EsimProfileList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/ESimProfiles'.format(**self._solution)
def create(self, callback_url=values.unset, callback_method=values.unset,
eid=values.unset):
"""
Create the EsimProfileInstance
:param unicode callback_url: The URL we should call after we have sent when the status of the eSIM Profile changes
:param unicode callback_method: The HTTP method we should use to call callback_url
:param unicode eid: Identifier of the eUICC that will claim the eSIM Profile
:returns: The created EsimProfileInstance
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileInstance
"""
data = values.of({'CallbackUrl': callback_url, 'CallbackMethod': callback_method, 'Eid': eid, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return EsimProfileInstance(self._version, payload, )
def stream(self, eid=values.unset, sim_sid=values.unset, status=values.unset,
limit=None, page_size=None):
"""
Streams EsimProfileInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode eid: List the eSIM Profiles that have been associated with an EId
:param unicode sim_sid: Find the eSIM Profile resource related to a Sim resource by providing the SIM SID
:param EsimProfileInstance.Status status: List the eSIM Profiles that are in a given status
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.supersim.v1.esim_profile.EsimProfileInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(eid=eid, sim_sid=sim_sid, status=status, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, eid=values.unset, sim_sid=values.unset, status=values.unset,
limit=None, page_size=None):
"""
Lists EsimProfileInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode eid: List the eSIM Profiles that have been associated with an EId
:param unicode sim_sid: Find the eSIM Profile resource related to a Sim resource by providing the SIM SID
:param EsimProfileInstance.Status status: List the eSIM Profiles that are in a given status
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.supersim.v1.esim_profile.EsimProfileInstance]
"""
return list(self.stream(eid=eid, sim_sid=sim_sid, status=status, limit=limit, page_size=page_size, ))
def page(self, eid=values.unset, sim_sid=values.unset, status=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of EsimProfileInstance records from the API.
Request is executed immediately
:param unicode eid: List the eSIM Profiles that have been associated with an EId
:param unicode sim_sid: Find the eSIM Profile resource related to a Sim resource by providing the SIM SID
:param EsimProfileInstance.Status status: List the eSIM Profiles that are in a given status
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of EsimProfileInstance
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfilePage
"""
data = values.of({
'Eid': eid,
'SimSid': sim_sid,
'Status': status,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return EsimProfilePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of EsimProfileInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of EsimProfileInstance
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfilePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return EsimProfilePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a EsimProfileContext
:param sid: The SID of the eSIM Profile resource to fetch
:returns: twilio.rest.supersim.v1.esim_profile.EsimProfileContext
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileContext
"""
return EsimProfileContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a EsimProfileContext
:param sid: The SID of the eSIM Profile resource to fetch
:returns: twilio.rest.supersim.v1.esim_profile.EsimProfileContext
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileContext
"""
return EsimProfileContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Supersim.V1.EsimProfileList>'
class EsimProfilePage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the EsimProfilePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.supersim.v1.esim_profile.EsimProfilePage
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfilePage
"""
super(EsimProfilePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of EsimProfileInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.supersim.v1.esim_profile.EsimProfileInstance
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileInstance
"""
return EsimProfileInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Supersim.V1.EsimProfilePage>'
class EsimProfileContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, sid):
"""
Initialize the EsimProfileContext
:param Version version: Version that contains the resource
:param sid: The SID of the eSIM Profile resource to fetch
:returns: twilio.rest.supersim.v1.esim_profile.EsimProfileContext
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileContext
"""
super(EsimProfileContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/ESimProfiles/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the EsimProfileInstance
:returns: The fetched EsimProfileInstance
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return EsimProfileInstance(self._version, payload, sid=self._solution['sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Supersim.V1.EsimProfileContext {}>'.format(context)
class EsimProfileInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
class Status(object):
NEW = "new"
RESERVING = "reserving"
AVAILABLE = "available"
DOWNLOADED = "downloaded"
INSTALLED = "installed"
FAILED = "failed"
def __init__(self, version, payload, sid=None):
"""
Initialize the EsimProfileInstance
:returns: twilio.rest.supersim.v1.esim_profile.EsimProfileInstance
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileInstance
"""
super(EsimProfileInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'iccid': payload.get('iccid'),
'sim_sid': payload.get('sim_sid'),
'status': payload.get('status'),
'eid': payload.get('eid'),
'smdp_plus_address': payload.get('smdp_plus_address'),
'error_code': payload.get('error_code'),
'error_message': payload.get('error_message'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: EsimProfileContext for this EsimProfileInstance
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileContext
"""
if self._context is None:
self._context = EsimProfileContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account to which the eSIM Profile resource belongs
:rtype: unicode
"""
return self._properties['account_sid']
@property
def iccid(self):
"""
:returns: The ICCID associated with the Sim resource
:rtype: unicode
"""
return self._properties['iccid']
@property
def sim_sid(self):
"""
:returns: The SID of the Sim resource that this eSIM Profile controls
:rtype: unicode
"""
return self._properties['sim_sid']
@property
def status(self):
"""
:returns: The status of the eSIM Profile
:rtype: EsimProfileInstance.Status
"""
return self._properties['status']
@property
def eid(self):
"""
:returns: Identifier of the eUICC that can claim the eSIM Profile
:rtype: unicode
"""
return self._properties['eid']
@property
def smdp_plus_address(self):
"""
:returns: Address of the SM-DP+ server from which the Profile will be downloaded
:rtype: unicode
"""
return self._properties['smdp_plus_address']
@property
def error_code(self):
"""
:returns: Code indicating the failure if the download of the SIM Profile failed and the eSIM Profile is in `failed` state
:rtype: unicode
"""
return self._properties['error_code']
@property
def error_message(self):
"""
:returns: Error message describing the failure if the download of the SIM Profile failed and the eSIM Profile is in `failed` state
:rtype: unicode
"""
return self._properties['error_message']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The absolute URL of the eSIM Profile resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the EsimProfileInstance
:returns: The fetched EsimProfileInstance
:rtype: twilio.rest.supersim.v1.esim_profile.EsimProfileInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Supersim.V1.EsimProfileInstance {}>'.format(context)
| {
"content_hash": "709cc3cc31410367e6b6de4483140863",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 138,
"avg_line_length": 36.45813953488372,
"alnum_prop": 0.6270970211137334,
"repo_name": "twilio/twilio-python",
"id": "454b5506c73bdc7e3993aeb4cb70efad3b97c5f5",
"size": "15692",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/supersim/v1/esim_profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
from google.cloud import notebooks_v1
async def sample_start_runtime():
# Create a client
client = notebooks_v1.ManagedNotebookServiceAsyncClient()
# Initialize request argument(s)
request = notebooks_v1.StartRuntimeRequest(
name="name_value",
)
# Make the request
operation = client.start_runtime(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END notebooks_v1_generated_ManagedNotebookService_StartRuntime_async]
| {
"content_hash": "593d6d8bf507e2d2edc89fe523eb7f9f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 72,
"avg_line_length": 24.695652173913043,
"alnum_prop": 0.7147887323943662,
"repo_name": "googleapis/python-notebooks",
"id": "3c457bc93827901c7210b2ee732eed9e75187c24",
"size": "1965",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/notebooks_v1_generated_managed_notebook_service_start_runtime_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1752787"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
} |
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('plonetheme.clean_blog')
| {
"content_hash": "9d8614c1be40c1ea003099ab66d1f11d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 22.8,
"alnum_prop": 0.7543859649122807,
"repo_name": "vikas-parashar/plonetheme.clean_blog",
"id": "19198e19e91e56aeb9c0ba34cdc42c01210ca167",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/plonetheme/clean_blog/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135405"
},
{
"name": "HTML",
"bytes": "4606"
},
{
"name": "JavaScript",
"bytes": "8049"
},
{
"name": "Python",
"bytes": "13734"
},
{
"name": "RobotFramework",
"bytes": "2015"
},
{
"name": "Shell",
"bytes": "500"
}
],
"symlink_target": ""
} |
def title_case(title, minor_words=''):
""" Thanks to 'soapie' from CodeWars """
title = title.capitalize().split()
minor_words = minor_words.lower().split()
return ' '.join(word if word in minor_words else word.capitalize()
for word in title)
| {
"content_hash": "5a6061c3f42c2ed39c8ca811879631ec",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 70,
"avg_line_length": 46.5,
"alnum_prop": 0.6164874551971327,
"repo_name": "the-zebulan/CodeWars",
"id": "130e9dc47d6320d399465200627c101a68c6decd",
"size": "695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_6/title_case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('registration', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='registrants',
name='company',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='registration.Company'),
preserve_default=False,
),
]
| {
"content_hash": "a68225e51e0ed65ab18461fedede14b2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 119,
"avg_line_length": 25.7,
"alnum_prop": 0.6361867704280155,
"repo_name": "asterix135/infonex_crm",
"id": "b280190e7fb69e9b38f6b003e4363762349f725d",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registration/migrations/0002_registrants_company.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "112273"
},
{
"name": "HTML",
"bytes": "272534"
},
{
"name": "JavaScript",
"bytes": "415610"
},
{
"name": "Python",
"bytes": "674129"
},
{
"name": "Shell",
"bytes": "1837"
}
],
"symlink_target": ""
} |
__all__ = ['getDpi', 'getContext', 'getPreset', 'getScreenContext']
import matplotlib as mpl
from math import sqrt
def getDpi():
return 72.27
_screenDpi = 96
_screenFontSize = 12
_goldenRatio = (1 + sqrt(5)) / 2
_presets = {
'revtex12-single': (468, 10.95, None), # Single-column revtex; 12pt.
'mnras': (240, 8, _goldenRatio), # Double-column MNRAS; default font size.
'mnras-2': (504, 8, _goldenRatio * 1.2), # Single-column MNRAS; default font size.
'thesis': (300, 8, _goldenRatio),
'thesis-wide': (426, 8, _goldenRatio)
}
def getPreset(preset):
return _presets[preset]
def getContext(returnParams=False, *args, **kwargs):
width, height, dpi, fontSize, tickFontSize = _getParams(*args, **kwargs)
rc = {}
rc['text.latex.unicode'] = True
rc['text.usetex'] = True
rc['font.family'] = 'serif'
rc['font.serif'] = ['Computer Modern']
rc['font.size'] = fontSize
rc['axes.labelsize'] = fontSize
rc['legend.fontsize']= fontSize
rc['xtick.labelsize'] = tickFontSize
rc['ytick.labelsize'] = tickFontSize
rc['axes.labelcolor'] = 'black'
rc['xtick.color'] = 'black'
rc['ytick.color'] = 'black'
rc['figure.figsize'] = width, height
rc['figure.dpi'] = dpi
if returnParams:
return rc
else:
return mpl.rc_context(rc)
def getScreenContext(pixelWidth=None, fontSize=None, aspectRatio=None, returnParams=False):
if aspectRatio is None:
aspectRatio = _goldenRatio
if fontSize is None:
fontSize = _screenFontSize
params = {
'figure.dpi': _screenDpi,
'text.usetex': False,
'mathtext.fontset': 'stixsans',
'font.family': 'Bitstream Vera Sans',
'font.size': fontSize,
'axes.labelsize': fontSize,
'legend.fontsize': fontSize,
'xtick.labelsize': fontSize * 0.8,
'ytick.labelsize': fontSize * 0.8,
}
if pixelWidth is not None:
pixelHeight = pixelWidth / aspectRatio
params['figure.figsize'] = (pixelWidth / _screenDpi, pixelHeight / _screenDpi)
if returnParams:
return params
else:
return mpl.rc_context(params)
def _getParams(preset=None, width=None, fontSize=None, aspectRatio=None, height=None):
if preset is not None:
assert preset in _presets, \
'Preset not found.'
width0, fontSize0, aspectRatio0 = _presets[preset]
else:
width0, fontSize0, aspectRatio0 = None, None, None
if width is not None:
if width <= 1:
assert width0 is not None
width *= width0
else:
width = width0
width = width if width is not None else width0
fontSize = fontSize if fontSize is not None else fontSize0
aspectRatio = aspectRatio if aspectRatio is not None else aspectRatio0
assert width is not None and fontSize is not None, \
'Column width or font size missing.'
dpi = getDpi() # One inch in points (according to TeX).
if height is None:
if aspectRatio is None:
aspectRatio = _goldenRatio
height = width / aspectRatio
width /= dpi
height /= dpi
return width, height, dpi, fontSize, fontSize * 0.8
| {
"content_hash": "3c2972bd396aec2e050381165b99bab2",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 91,
"avg_line_length": 29.97196261682243,
"alnum_prop": 0.6239476145930777,
"repo_name": "willvousden/mpltex",
"id": "73d32689a54f12bd0a342848696ad7d22e787b65",
"size": "3207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpltex/mpltex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4080"
}
],
"symlink_target": ""
} |
import numpy as np
from scipy.fftpack import fft
import math
##################################################################################
##################################################################################
def fourierAn(y):
#aplicar transformada de fourier a los datos
Y = fft(y)
# Calcular magnitud y angulo, normalizar por el num de muestras
absY = abs(Y)/(y.size)
#fase
pY = np.unwrap(np.angle(Y))
###########################################
#reorganizar el espectro para graficar
#numero de muestras hasta la mitad del espectro
hN=int(math.floor((Y.size+1)/2))
absY=np.hstack((absY[hN:],absY[:hN]))
pY=np.hstack((pY[hN:],pY[:hN]))
#calcular la magnitud en dB
absY[absY < np.finfo(float).eps] = np.finfo(float).eps # Si hay ceros, reemplazar por un valor muy pequeno, antes de aplicar el log
Ydb = 20 * np.log10(absY)
#retornar la magnitud, la magnitud en decibeles y la fase
return absY,Ydb,pY
| {
"content_hash": "a4cdea21370bccddb791e2c62ee3cf26",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 142,
"avg_line_length": 43.68,
"alnum_prop": 0.47802197802197804,
"repo_name": "miltonsarria/dsp-python",
"id": "9b8affaa04afb1aafaec4f50b513169782b95e4d",
"size": "1197",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dsp2018/labDSP_stochastic_sine/fourierFunc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28084"
},
{
"name": "C++",
"bytes": "2359"
},
{
"name": "Jupyter Notebook",
"bytes": "497554"
},
{
"name": "Python",
"bytes": "406827"
}
],
"symlink_target": ""
} |
'''Actions for moving things around based on their velocity and
acceleration.
The simplest usage:
sprite = cocos.sprite.Sprite('ship.png')
sprite.velocity = (100, 100)
sprite.do(Move())
This will move the sprite (100, 100) pixels per second indefinitely.
Typically the sprite would be controlled by the user, so something like::
keys = <standard pyglet keyboard state handler>
class MoveShip(Move):
def step(self, dt):
super(MoveShip, self).step(dt)
self.target.dr = (keys[key.RIGHT] - keys[key.LEFT]) * 360
rotation = math.pi * self.target.rotation / 180.0
rotation_x = math.cos(-rotation)
rotation_y = math.sin(-rotation)
if keys[key.UP]:
self.target.acceleration = (200 * rotation_x, 200 * rotation_y)
ship.do(MoveShip())
'''
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
__all__ = [
'Move', 'WrappedMove', 'BoundedMove', 'Driver',
]
import math
from .base_actions import Action
class Move(Action):
"""Move the target based on parameters on the target.
For movement the parameters are::
target.position = (x, y)
target.velocity = (dx, dy)
target.acceleration = (ddx, ddy) = (0, 0)
target.gravity = 0
And rotation::
target.rotation
target.dr
target.ddr
"""
def step(self, dt):
x, y = self.target.position
dx, dy = self.target.velocity
ddx, ddy = getattr(self.target, 'acceleration', (0, 0))
gravity = getattr(self.target, 'gravity', 0)
dx += ddx * dt
dy += (ddy + gravity) * dt
self.target.velocity = (dx, dy)
x += dx * dt
y += dy * dt
self.target.position = (x, y)
dr = getattr(self.target, 'dr', 0)
ddr = getattr(self.target, 'ddr', 0)
if dr or ddr:
dr = self.target.dr = dr + ddr * dt
if dr:
self.target.rotation += dr * dt
class WrappedMove(Move):
"""Move the target but wrap position when it hits certain bounds.
Wrap occurs outside of 0 < x < width and 0 < y < height taking into
account the dimenstions of the target.
"""
def init(self, width, height):
"""Init method.
:Parameters:
`width` : integer
The width to wrap position at.
`height` : integer
The height to wrap position at.
"""
self.width, self.height = width, height
def step(self, dt):
super(WrappedMove, self).step(dt)
x, y = self.target.position
w, h = self.target.width, self.target.height
# XXX assumes center anchor
if x > self.width + w/2:
x -= self.width + w
elif x < 0 - w/2:
x += self.width + w
if y > self.height + h/2:
y -= self.height + h
elif y < 0 - h/2:
y += self.height + h
self.target.position = (x, y)
class BoundedMove(Move):
"""Move the target but limit position when it hits certain bounds.
Position is bounded to 0 < x < width and 0 < y < height taking into
account the dimenstions of the target.
"""
def init(self, width, height):
"""Init method.
:Parameters:
`width` : integer
The width to bound position at.
`height` : integer
The height to bound position at.
"""
self.width, self.height = width, height
def step(self, dt):
super(BoundedMove, self).step(dt)
x, y = self.target.position
w, h = self.target.width, self.target.height
# XXX assumes center anchor
if x > self.width - w/2:
x = self.width - w/2
elif x < w/2:
x = w/2
if y > self.height - h/2:
y = self.height - h/2
elif y < h/2:
y = h/2
self.target.position = (x, y)
class Driver(Action):
"""Drive a `CocosNode` object around like a car in x, y according to
a direction and speed.
Example::
# control the movement of the given sprite
sprite.do(Driver())
...
sprite.rotation = 45
sprite.speed = 100
...
The sprite MAY have these parameters (beyond the standard position
and rotation):
`speed` : float
Speed to move at in pixels per second in the direction of
the target's rotation.
`acceleration` : float
If specified will automatically be added to speed.
Specified in pixels per second per second.
`max_forward_speed` : float (default None)
Limits to apply to speed when updating with acceleration.
`max_reverse_speed` : float (default None)
Limits to apply to speed when updating with acceleration.
"""
def step(self, dt):
accel = getattr(self.target, 'acceleration', 0)
speed = getattr(self.target, 'speed', 0)
max_forward = getattr(self.target, 'max_forward_speed', None)
max_reverse = getattr(self.target, 'max_reverse_speed', None)
if accel:
speed += dt * accel
if max_forward is not None and self.target.speed > max_forward:
speed = max_forward
if max_reverse is not None and self.target.speed < max_reverse:
speed = max_reverse
r = math.radians(self.target.rotation)
s = dt * speed
x, y = self.target.position
self.target.position = (x + math.sin(r) * s, y + math.cos(r) * s)
self.target.speed = speed
| {
"content_hash": "e2f780e3114b53db820c8ebab861383c",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 75,
"avg_line_length": 30.481081081081083,
"alnum_prop": 0.5687178577762014,
"repo_name": "shujunqiao/cocos2d-python",
"id": "77b9335642210eec48ce90e7efe3497d38f93002",
"size": "7472",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cocos/actions/move_actions.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "22381"
},
{
"name": "Python",
"bytes": "1271799"
},
{
"name": "Shell",
"bytes": "7097"
}
],
"symlink_target": ""
} |
from logging.config import dictConfig
from .logging_config import LOGGING
def setup_logger(config=None):
if config is None:
config = LOGGING
dictConfig(config)
| {
"content_hash": "cc43e69a1806d8bf7476b97e143b353f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 37,
"avg_line_length": 19.88888888888889,
"alnum_prop": 0.7262569832402235,
"repo_name": "jheld/diycrate",
"id": "bad320f32beee03ea20fccd9abc468d826853f82",
"size": "179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diycrate/log_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "955"
},
{
"name": "Python",
"bytes": "123055"
}
],
"symlink_target": ""
} |
"""Export data from Nova database and import through Identity Service."""
import uuid
from keystone.common import logging
from keystone.contrib.ec2.backends import sql as ec2_sql
from keystone.identity.backends import sql as identity_sql
LOG = logging.getLogger(__name__)
def import_auth(data):
identity_api = identity_sql.Identity()
tenant_map = _create_tenants(identity_api, data['tenants'])
user_map = _create_users(identity_api, data['users'])
_create_memberships(identity_api, data['user_tenant_list'],
user_map, tenant_map)
role_map = _create_roles(identity_api, data['roles'])
_assign_roles(identity_api, data['role_user_tenant_list'],
role_map, user_map, tenant_map)
ec2_api = ec2_sql.Ec2()
ec2_creds = data['ec2_credentials']
_create_ec2_creds(ec2_api, identity_api, ec2_creds, user_map)
def _generate_uuid():
return uuid.uuid4().hex
def _create_tenants(api, tenants):
tenant_map = {}
for tenant in tenants:
tenant_dict = {
'id': _generate_uuid(),
'name': tenant['id'],
'description': tenant['description'],
'enabled': True,
}
tenant_map[tenant['id']] = tenant_dict['id']
LOG.debug('Create tenant %s' % tenant_dict)
api.create_tenant(tenant_dict['id'], tenant_dict)
return tenant_map
def _create_users(api, users):
user_map = {}
for user in users:
user_dict = {
'id': _generate_uuid(),
'name': user['id'],
'email': '',
'password': user['password'],
'enabled': True,
}
user_map[user['id']] = user_dict['id']
LOG.debug('Create user %s' % user_dict)
api.create_user(user_dict['id'], user_dict)
return user_map
def _create_memberships(api, memberships, user_map, tenant_map):
for membership in memberships:
user_id = user_map[membership['user_id']]
tenant_id = tenant_map[membership['tenant_id']]
LOG.debug('Add user %s to tenant %s' % (user_id, tenant_id))
api.add_user_to_tenant(tenant_id, user_id)
def _create_roles(api, roles):
role_map = dict((r['name'], r['id']) for r in api.list_roles())
for role in roles:
if role in role_map:
LOG.debug('Ignoring existing role %s' % role)
continue
role_dict = {
'id': _generate_uuid(),
'name': role,
}
role_map[role] = role_dict['id']
LOG.debug('Create role %s' % role_dict)
api.create_role(role_dict['id'], role_dict)
return role_map
def _assign_roles(api, assignments, role_map, user_map, tenant_map):
for assignment in assignments:
role_id = role_map[assignment['role']]
user_id = user_map[assignment['user_id']]
tenant_id = tenant_map[assignment['tenant_id']]
LOG.debug('Assign role %s to user %s on tenant %s' %
(role_id, user_id, tenant_id))
api.add_role_to_user_and_tenant(user_id, tenant_id, role_id)
def _create_ec2_creds(ec2_api, identity_api, ec2_creds, user_map):
for ec2_cred in ec2_creds:
user_id = user_map[ec2_cred['user_id']]
for tenant_id in identity_api.get_tenants_for_user(user_id):
cred_dict = {
'access': '%s:%s' % (tenant_id, ec2_cred['access_key']),
'secret': ec2_cred['secret_key'],
'user_id': user_id,
'tenant_id': tenant_id,
}
LOG.debug('Creating ec2 cred for user %s and tenant %s' %
(user_id, tenant_id))
ec2_api.create_credential(None, cred_dict)
| {
"content_hash": "c428fbda9c316e14bf4878ee1800182a",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 73,
"avg_line_length": 34.018348623853214,
"alnum_prop": 0.5744336569579288,
"repo_name": "cbrucks/keystone_ldap",
"id": "01b14d9847fd13269019e388fd5f17cb817523eb",
"size": "4332",
"binary": false,
"copies": "1",
"ref": "refs/heads/essex-eol-ldap",
"path": "keystone/common/sql/nova.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "591270"
},
{
"name": "Shell",
"bytes": "5273"
}
],
"symlink_target": ""
} |
"""
Concept from:
[1] https://github.com/venidera/otsdb_client/blob/master/otsdb_client/client.py
2017.02, Bikash Agrawal, DNVGL
Insert spark dataframe to opentsdb
Example usage
--------------
>>> import opentsdb
>>> oc = opentsdb()
>>> oc.ts_insert(df)
Note: it works only with spark dataframe.
"""
import requests as gr
import time
import itertools
from datetime import datetime
import socket
import urllib2
import httplib
import json
import datetime as dt
import random
from dateutil import rrule
from collections import OrderedDict
from multiprocessing import Process, Queue, Pool
import time
import logging
import logging.config
import re
from json import dumps as tdumps, loads
import sys, os
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger('opentsdb.process')
class opentsdb(object):
def __init__(self, hostname='localhost', port=9998):
self.port = port
self.hostname = hostname
self.url = 'http://%s:%d' % (hostname, port)
self.headers = {'content-type': "application/json"}
self.aggregators = self.get_aggregators()
self.ids = {"filter": {}, "metric": {}}
## test opentsdb connection
def ping(self, host, port):
import socket
try:
socket.socket().connect((host, port))
print('Ping in '+host+':'+str(port) + " OpenTSDB Server: Ok")
return True
except socket.error as err:
if err.errno == socket.errno.ECONNREFUSED:
raise Exception('Can\'t connect to OpenTSDB Server')
raise Exception('Fail to test OpenTSDB connection status')
def get_endpoint(self, key=""):
endpoint = '/api' + {
'filters': '/config/filters',
'query_exp': '/query/exp',
'aggr': '/aggregators',
'suggest': '/suggest',
'version': '/version',
'put': '/put?details',
'query': '/query',
'stats': '/stats',
}.get(str(key))
assert endpoint is not '/api', \
"Please provide a valid endpoint."
return endpoint
def _get(self, endpoint="", params=dict()):
r = gr.get(self.url + self.get_endpoint(endpoint),
params=params)
#gr.map([r],exception_handler=exception_handler)
return r
def _post(self, endpoint="", data=dict()):
assert isinstance(data, dict), 'Field <data> must be a dict.'
r = gr.post(self.url + self.get_endpoint(endpoint),
data=self.dumps(data), headers=self.headers)
#gr.map([r],exception_handler=exception_handler)
return r
def process_response(self, response):
status = response.status_code
if not (200 <= status < 300):
logger.info("HTTP error code = %d" % status)
return False
data = loads(response.text)
return data if data else None
def filters(self):
""" Lists the various filters loaded by the TSD """
resp = self._get(endpoint="filters")
return self.process_response(resp)
def statistics(self):
"""Get info about what metrics are registered and with what stats."""
resp = self._get(endpoint="stats")
return self.process_response(resp)
def get_aggregators(self):
"""Used to get the list of default aggregation functions. """
resp = self._get(endpoint="aggr")
return self.process_response(resp)
def version(self):
"""Used to check OpenTSDB version. """
resp = self._get(endpoint="version")
return self.process_response(resp)
def suggest(self, type='metrics', q='', max=9999):
""" Matches the string in the query on the first chars of the stored data.
Parameters
----------
'type' : string (default='metrics')
The type of data. Must be one of the following: metrics, tagk or tagv.
'q' : string, optional (default='')
A string to match on for the given type.
'max' : int, optional (default=9999)
The maximum number of suggested results. Must be greater than 0.
"""
resp = self._get(endpoint="suggest", params={'type': type, 'q': q, 'max': max})
return self.process_response(resp)
def put(self, metric=None, timestamps=[], values=[], tags=dict(),
details=True, verbose=True, ptcl=20, att=5):
""" Put time serie points into OpenTSDB over HTTP.
Parameters
----------
'metric' : string, required (default=None)
The name of the metric you are storing.
'timestamps' : int, required (default=None) ** [generated over mktime]
A Unix epoch style timestamp in seconds or milliseconds.
'values' : array, required (default=[])
The values to record.
'tags' : map, required (default=dict())
A map of tag name/tag value pairs.
'details' : boolean, optional (default=True)
Whether or not to return detailed information
'verbose' : boolean, optional (default=False)
Enable verbose output.
'ptcl' : int, required (default=10)
Number of points sent per http request
'att' : int, required (default=5)
Number of HTTP request attempts
"""
assert isinstance(metric, str), 'Field <metric> must be a string.'
assert isinstance(values, list), 'Field <values> must be a list.'
assert isinstance(timestamps, list), 'Field <timestamps> must be a list.'
if len(timestamps) > 0:
assert len(timestamps) == len(values), \
'Field <timestamps> dont fit field <values>.'
assert all(isinstance(x, (int, datetime)) for x in timestamps), \
'Field <timestamps> must be integer or datetime'
pts = list()
ptl = []
ptc = 0
for n, v in enumerate(values):
v = float(v)
if not timestamps:
current_milli_time = lambda: int(round(time.time() * 1000))
nts = current_milli_time()
else:
nts = timestamps[n]
if isinstance(nts, datetime):
nts = int(time.mktime(nts.timetuple()))
elif not isinstance(nts, int):
nts = int(nts)
u = {'timestamp': nts, 'metric': metric, 'value': v, 'tags': tags}
ptl.append(u)
ptc += 1
if ptc == ptcl:
ptc = 0
pts.append(gr.post(self.url + self.get_endpoint("put") +
'?summary=true&details=true', data=self.dumps(ptl)))
ptl = list()
if ptl:
pts.append(gr.post(self.url + self.get_endpoint("put") +
'?summary=true&details=true', data=self.dumps(ptl)))
attempts = 0
fails = 1
while attempts < att and fails > 0:
#gr.map(pts,exception_handler=exception_handler)
if verbose:
print('Attempt %d: Request submitted with HTTP status codes %s' \
% (attempts + 1, str([x.response.status_code for x in pts])))
pts = [x for x in pts if not 200 <= x.response.status_code <= 300]
attempts += 1
fails = len([x for x in pts])
if verbose:
total = len(values)
print("%d of %d (%.2f%%) requests were successfully sent" \
% (total - fails, total, 100 * round(float((total - fails))/total, 2)))
return {
'points': len(values),
'success': len(values) - fails,
'failed': fails
}
def query(self, queries=[], start='1h-ago', end='now', show_summary=False,
show_json=False, nots=False, tsd=True, group=False):
""" Enables extracting data from the storage system
Parameters
----------
'metric' : string, required (default=None)
The name of a metric stored in the system.
'aggr' : string, required (default=sum)
The name of an aggregation function to use.
'tags' : map, required (default=dict())
A map of tag name/tag value pairs.
'start' : string, required (default=1h-ago)
The start time for the query.
'end' : string, optional (default=current time)
An end time for the query.
'show_summary' : boolean, optional (default=False)
Whether or not to show a summary of timings surrounding the query.
'show_json': boolean, optional (default=False)
If true, returns the response in the JSON format
'nots': boolean, optional (default=False)
Hides timestamp results
'tsd': boolean, optional (default=True)
Set timestamp as datetime object instead of an integer
'group': boolean, optional (default=False)
Returns the points of the time series grouped (i.e. metric + tags) in one list
"""
assert isinstance(queries, list), 'Field <queries> must be a list.'
assert len(queries) > 0, 'Field <queries> must have at least one query'
for q in queries:
assert isinstance(q, dict), 'Field <element> must be a dict.'
assert all(i in q.keys() for i in ['m', 'aggr', 'tags']), \
'Not all required elements were informed.'
assert isinstance(q['m'], str), \
'Field <metric> must be a string.'
assert q['aggr'] in self.aggregators, \
'The aggregator is not valid.'
assert isinstance(q['tags'], dict), \
'Field <tags> must be a dict'
if 'rate' in q.keys():
assert isinstance(q['rate'], bool), \
'Field <rate> must be True or False'
data = {"start": start, "end": end, "queries":
[{
"aggregator": q['aggr'],
"metric": q['m'],
"tags": q['tags'],
"rate": q['rate'] if 'rate' in q.keys() else False,
'show_summary': show_summary
} for q in queries]
}
resp = self._post(endpoint="query", data=data)
if 200 <= resp.status_code <= 300:
result = None
if show_json:
# Raw response
result = resp.text
else:
data = loads(resp.text)
if group:
dpss = dict()
for x in data:
if 'metric' in x.keys():
for k,v in x['dps'].items():
if k in dpss.keys():
dpss[k] += v
else:
dpss[k] = v
points = sorted(dpss.items())
if not nots:
result = {'results':{'timestamps':[],'values':[]}}
if tsd:
result['results']['timestamps'] = [datetime.fromtimestamp(float(x[0])) for x in points]
else:
result['results']['timestamps'] = [x[0] for x in points]
else:
result = {'results':{'values':[]}}
result['results']['values'] = [float(x[1]) for x in points]
else:
result = {'results':[]}
for x in data:
if 'metric' in x.keys():
dps = x['dps']
points = sorted(dps.items())
resd = {'metric':x['metric'],'tags':x['tags'],'timestamps':[],'values':[float(y[1]) for y in points]}
if not nots:
if tsd:
resd['timestamps'] = [datetime.fromtimestamp(float(x[0])) for x in points]
else:
resd['timestamps'] = [x[0] for x in points]
else:
del resd['timestamps']
result['results'].append(resd)
if show_summary:
result['summary'] = data[-1]['statsSummary']
return result
else:
print('No results found')
return []
def gen_id(self, tid="", desc=""):
assert tid in self.ids.keys(), "Field <tip> is not valid."
assert desc, "Field <desc> is not valid."
if desc not in self.ids[tid].keys():
if len(self.ids[tid]) == 0:
self.ids[tid][desc] = 1
else:
self.ids[tid][desc] = max(self.ids[tid].values()) + 1
return "%s%d" % (tid[:1], self.ids[tid][desc])
def build_policy(self, vpol=None):
assert vpol != None, \
'Field <vpol> must have a value.'
if vpol == 0:
return {'policy': 'zero'}
elif any(isinstance(vpol, i) for i in [int, float]):
return {'policy': 'scalar', 'value': vpol}
elif vpol in ['nan', 'null']:
return {'policy': vpol}
else:
assert False, 'Field <vpol> is not valid.'
def build_downsampler(self, aggr='max', interval=None, vpol=None):
assert interval != None, \
'Field <interval> is not valid.'
assert aggr in self.aggregators, \
'The aggregator is not valid. Check OTSDB docs for more details.'
ret = {'interval': interval, 'aggregator': aggr}
if vpol:
ret['fillPolicy'] = self.build_policy(vpol)
return ret
def build_filter(self, tags={}, group=True):
assert len(tags) > 0 and isinstance(tags, dict), \
'Field <tags> is not valid.'
obj = {"id" : self.gen_id("filter", self.dumps(tags)), "tags" : []}
for t in tags:
obj["tags"].append(
{
"type": "literal_or",
"tagk": t,
"filter": tags[t],
"groupBy": group
}
)
return obj
def query_expressions(self, aggr='sum', start='1d-ago', end='now', vpol="nan",
metrics=[], exprs=[], dsampler=None, forceAggregate=False):
""" Allows for querying data using expressions.
Parameters
----------
'aggr' : string, required (default=sum)
The name of an aggregation function to use.
'start' : string, required (default=1h-ago)
The start time for the query.
'end' : string, optional (default=current time)
An end time for the query.
'vpol': [int, float, str], required (default=0)
The value used to replace "missing" values, i.e. when a data point was
expected but couldn't be found in storage.
'metrics': array of tuples, required (default=[])
Determines the pairs (metric, tags) in the expressions.
'exprs': array of tuples, required (default=[])
A list with one or more pairs (id, expr) of expressions.
'dsampler': tuple of three elements, optional (default=None)
Reduces the number of data points returned, given an interval
'forceAggregate': boolean, optional (default=false)
Forces the aggregation of metrics with the same name
"""
assert aggr in self.aggregators, \
'The aggregator is not valid. Check OTSDB docs for more details.'
assert any(isinstance(vpol, i) for i in [int, float]) or \
(isinstance(vpol, str) and vpol in ['null', 'nan']), \
'Field <vpol> is not valid.'
assert isinstance(metrics, list), 'Field <metrics> must be a list.'
assert len(metrics) > 0, 'Field <metrics> must have at least one element'
for m in metrics:
assert isinstance(m, dict), 'Field <element> must be a dict.'
assert all(i in m.keys() for i in ['m', 'tags']), \
'Not all required element keys were informed.'
assert isinstance(m['m'], str), \
'Field <metric> must be a string.'
assert isinstance(m['tags'], dict), \
'Field <tags> must be a dict'
assert isinstance(exprs, list), 'Field <exprs> must be a list.'
assert len(exprs) > 0, 'Field <exprs> must have at least one metric'
for e in exprs:
assert len(e) == 2, \
'Tuple must have the (id, expr) format.'
assert isinstance(e[0], str), \
'Field <id> must be a string.'
assert isinstance(e[1], str), \
'Field <expr> must be a string.'
if dsampler:
assert 2 <= len(dsampler) <= 3, \
'Field <dsampler> must be composed by (interval, aggr) ' \
'or (interval, aggr, vpol).'
assert isinstance(dsampler[0], str), \
'Field <interval> must be a string.'
assert dsampler[1] in self.aggregators, \
'Field <aggr> is not a valid aggregator.'
# Setting <time> definitions
time = {
'start': start,
'aggregator': aggr,
'end': end
}
if dsampler:
time['downsampler'] = self.build_downsampler(
interval=dsampler[0], aggr=dsampler[1],
vpol=dsampler[2] if len(dsampler) == 3 else None)
# Setting <filters> definitions
filters = {self.dumps(i): self.build_filter(tags=i['tags']) for i in metrics}
# Setting <metric> definitions
q_metrics = []
for m in metrics:
obj = {
'id': self.gen_id(tid="metric", desc=self.dumps(m)),
'filter': filters[self.dumps(m)]['id'],
'metric': m['m']
}
if vpol is not None:
obj['fillPolicy'] = self.build_policy(vpol)
q_metrics.append(obj)
filters = filters.values()
filters = [i for n, i in enumerate(filters) if i not in filters[n + 1:]]
assert isinstance(filters, list) and len(filters) > 0, \
'Object filter is not valid.'
# Setting <expression> definitions
q_exprs = []
for e in exprs:
m_id = e[1]
for i, j in self.ids["metric"].iteritems():
m_id = m_id.replace(i, "m%d" % j)
obj = {
'id': e[0],
'expr': m_id
}
q_exprs.append(obj)
outputs = [
{
'id': e[0],
'alias': 'Expression %s' % e[0]
} for e in exprs]
# Building the data query
data = {
'time': time,
'metrics': q_metrics,
'filters': filters,
'expressions': q_exprs,
'outputs': outputs
}
# Sending request to OTSDB and capturing HTTP response
resp = self._post(endpoint="query_exp", data=data)
res = self.process_response(resp)
if forceAggregate == True:
for i in range(len(res["outputs"])):
# Forcing the aggregation
dps = res["outputs"][i]["dps"]
new_dps = []
for dp in dps:
if len(dp) > 2:
new_dps.append([dp[0], sum(dp[1:])])
res["outputs"][i]["dps"] = new_dps
res["outputs"][i]["dpsMeta"]["series"] = 1
res["outputs"][i]["meta"] = []
return res
def query_summing(self, aggr='sum', start='1d-ago', end='now', vpol="nan",
metrics=[], dsampler=None):
""" Sum all required metrics using query with expressions """
assert isinstance(metrics, list), 'Field <metrics> must be a list.'
assert len(metrics) > 0, 'Field <metrics> must have at least one element'
for m in metrics:
assert isinstance(m, dict), 'Field <element> must be a dict.'
assert all(i in m.keys() for i in ['m', 'tags']), \
'Not all required element keys were informed.'
assert isinstance(m['m'], str), \
'Field <metric> must be a string.'
assert isinstance(m['tags'], dict), \
'Field <tags> must be a dict'
expr = ""
for m in metrics:
expr += "%s + " % self.dumps(m)
expr = expr[:-3]
expressions = [("sum", expr)]
return self.query_expressions(aggr='sum', start=start, end=end, vpol=vpol,
metrics=metrics, exprs=expressions, dsampler=dsampler, forceAggregate=True)
def dumps(self, x):
return tdumps(x, default=str)
if __name__ == "__main__":
oc = opentsdb()
oc.ping("localhost",9998)
| {
"content_hash": "0256b413a438e2fa8a4d4683ae441571",
"timestamp": "",
"source": "github",
"line_count": 548,
"max_line_length": 129,
"avg_line_length": 38.33759124087591,
"alnum_prop": 0.5189680613070589,
"repo_name": "bikash/opentsdb_spark",
"id": "4ae2235bf7b82619ed23bd58f4360aeadc8b9c14",
"size": "21009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opentsdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22946"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import (Project, License, Team, Resource)
class TeamInline(admin.TabularInline):
model = Team
extra = 1
class ResourceInline(admin.TabularInline):
model = Resource
extra = 1
class ProjectAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
list_display = ('title', 'description')
inlines = [
TeamInline,
ResourceInline,
]
admin.site.register(Project, ProjectAdmin)
admin.site.register(License)
| {
"content_hash": "01af99843750e6a59ff10de5fd97b109",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 33.130434782608695,
"alnum_prop": 0.46062992125984253,
"repo_name": "nasa/39A",
"id": "f5f20bbb2a6863698163d1572ba59aa552ca416c",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spaceapps/projects/admin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "124497"
},
{
"name": "HTML",
"bytes": "180316"
},
{
"name": "JavaScript",
"bytes": "359832"
},
{
"name": "Python",
"bytes": "128383"
},
{
"name": "Ruby",
"bytes": "1593"
}
],
"symlink_target": ""
} |
__author__ = 'Edward Hunter'
from nose.plugins.attrib import attr
import gevent
import os
from pyon.util.int_test import IonIntegrationTestCase
from pyon.public import BadRequest, NotFound, IonObject, RT, PRED, OT, CFG, StreamSubscriber, log
from ion.agent.control import AgentControl, StreamingAgentClient
from ion.agent.streaming_agent import StreamingAgent
from ion.data.persist.hdf5_dataset import DS_BASE_PATH, DS_FILE_PREFIX
from ion.data.schema.schema import DataSchemaParser
from interface.services.scion.iscion_management import ScionManagementClient
from interface.services.core.iidentity_management_service import IdentityManagementServiceClient
from interface.objects import Instrument, Dataset, GeospatialLocation, DataPacket
@attr('INT', group='scion')
class TestScionCDIPAgentData(IonIntegrationTestCase):
"""Test for Scion with agents streaming data from CDIP source
"""
def setUp(self):
self._start_container()
self.patch_alt_cfg('scion.process.preload.preloader.CFG',
{'scion': {'preload': {'enabled': False}}})
self.container.start_rel_from_url('res/deploy/scion.yml')
self.rr = self.container.resource_registry
self.scion_client = ScionManagementClient()
self.idm_client = IdentityManagementServiceClient()
self.system_actor_id = None
self.ui_server_proc = self.container.proc_manager.procs_by_name["ui_server"]
self.scion_proc = self.container.proc_manager.procs_by_name["scion_management"]
self.ui_base_url = self.ui_server_proc.base_url
self.sg_base_url = self.ui_server_proc.gateway_base_url
def tearDown(self):
pass
def test_scion_agent(self):
# Create user
actor_id = self.scion_client.define_user(
first_name="John", last_name="Doe",
username="jdoe@scion.com", password="s3cret", email="jdoe@scion.com")
# Create instrument
agent_info=[dict(agent_type="data_agent",
config=dict(plugin="scion.agent.model.cdip.cdip_plugin.CDIP_DataAgentPlugin",
sampling_interval=10, stream_name="basic_streams",
auto_streaming=False))]
inst_obj = Instrument(name="TA_121A/MGENC/M40", description="CDIP buoy data",
location=GeospatialLocation(latitude=37.94831666666667, longitude=-123.4675),
agent_info=agent_info)
inst_id, _ = self.rr.create(inst_obj, actor_id=actor_id)
# Create dataset
schema_def = DataSchemaParser.parse_schema_ref("ds_cdip01_main")
ds_obj = Dataset(name="Dataset Sensor",
schema_definition=schema_def)
ds_id, _ = self.rr.create(ds_obj, actor_id=actor_id)
self.rr.create_association(inst_id, PRED.hasDataset, ds_id)
ds_filename = self.container.file_system.get("%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))
self.assertFalse(os.path.exists(ds_filename))
inst_data_t0 = self.scion_client.get_asset_data(inst_id)
self.assertEquals(inst_data_t0["dataset_id"], ds_id)
self.assertEquals(inst_data_t0["num_rows"], 0)
# Install a data packet catcher
self.recv_packets, self.recv_rows = [], 0
def process_packet_cb(packet, route, stream):
if not isinstance(packet, DataPacket):
log.warn("Received a non DataPacket message")
self.recv_packets.append(packet)
self.recv_rows += len(packet.data["data"])
log.info("Received data packet #%s: rows=%s, cols=%s", len(self.recv_packets), len(packet.data["data"]),
packet.data["cols"])
#log.info('Packet data: ' + str(packet.data))
def cleanup_stream_sub():
if self.stream_sub:
self.stream_sub.stop()
self.stream_sub = None
self.stream_sub = StreamSubscriber(process=self.scion_proc, stream="basic_streams", callback=process_packet_cb)
self.stream_sub.start()
self.addCleanup(cleanup_stream_sub)
# Start agent
self.assertFalse(StreamingAgentClient.is_agent_active(inst_id))
agent_pid = self.scion_client.start_agent(inst_id)
self.assertTrue(StreamingAgentClient.is_agent_active(inst_id))
sac = StreamingAgentClient(resource_id=inst_id, process=self.scion_proc)
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_INITIALIZED)
sac.connect()
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_CONNECTED)
# Coming in from the agent config.
streaming_args = {
'url' : 'http://cdip.ucsd.edu/data_access/justdar.cdip?029+pm',
'sampling_interval' : 10
}
sac.start_streaming(streaming_args)
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_STREAMING)
# Set to progressively high values for real data stream tests.
gevent.sleep(20)
# Retrieve data
self.assertTrue(os.path.exists(ds_filename))
inst_data = self.scion_client.get_asset_data(inst_id)
"""
{'data': {'Dp': [[1465682100000, 325]],
'Hs': [[1465682100000, 3.03]],
'Ta': [[1465682100000, 6.92]],
'Temp': [[1465682100000, 12.2]],
'Tp': [[1465682100000, 9.09]]},
'dataset_id': '08bc829159e6401182462b713b180dbe',
'num_rows': 1,
'ts_generated': '1465685467675',
'var_def': [{'base_type': 'ntp_time',
'description': 'NTPv4 timestamp',
'name': 'time',
'storage_dtype': 'i8',
'unit': ''},
{'base_type': 'float',
'description': 'Significant wave height',
'name': 'Hs',
'storage_dtype': 'f8',
'unit': 'meters'},
{'base_type': 'float',
'description': 'Peak wave period',
'name': 'Tp',
'storage_dtype': 'f8',
'unit': 'seconds'},
{'base_type': 'int',
'description': 'Peak wave direction',
'name': 'Dp',
'storage_dtype': 'i4',
'unit': 'degrees'},
{'base_type': 'float',
'description': 'Average wave period',
'name': 'Ta',
'storage_dtype': 'f8',
'unit': 'seconds'},
{'base_type': 'float',
'description': 'Surface temperature',
'name': 'Temp',
'storage_dtype': 'f8',
'unit': 'celcius'}],
'variables': ['time', 'Hs', 'Tp', 'Dp', 'Ta', 'Temp']}
"""
num_rows = inst_data["num_rows"]
log.info('CDIP test produced %i data rows.' % num_rows)
# Take down agent
sac.stop_streaming() # Not required to stop agent, just to test here
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_CONNECTED)
sac.disconnect()
agent_status = sac.get_status()
self.assertEquals(agent_status["current_state"], StreamingAgent.AGENTSTATE_INITIALIZED)
self.scion_client.stop_agent(inst_id)
self.assertFalse(StreamingAgentClient.is_agent_active(inst_id))
| {
"content_hash": "6d4f5f6cfe581a5084cda489be0d1bc0",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 119,
"avg_line_length": 41.07027027027027,
"alnum_prop": 0.5950250065806791,
"repo_name": "scion-network/scion",
"id": "7e38481de08c2f31d63efa1909e7a6cfcc90099e",
"size": "7621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/scion/service/test/test_scion_cdipdata.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Groff",
"bytes": "156117"
},
{
"name": "PLpgSQL",
"bytes": "10932"
},
{
"name": "Python",
"bytes": "104064"
},
{
"name": "Shell",
"bytes": "1773"
}
],
"symlink_target": ""
} |
import numpy as np
from copy import deepcopy
from .nddata_base import NDDataBase
from .nduncertainty import NDUncertainty, UnknownUncertainty
from .. import log
from ..units import Unit, Quantity
from ..utils.metadata import MetaData
__all__ = ['NDData']
_meta_doc = """`dict`-like : Additional meta information about the dataset."""
class NDData(NDDataBase):
"""
A container for `numpy.ndarray`-based datasets, using the
`~astropy.nddata.NDDataBase` interface.
The key distinction from raw `numpy.ndarray` is the presence of
additional metadata such as uncertainty, mask, unit, a coordinate system
and/or a dictionary containing further meta information. This class *only*
provides a container for *storing* such datasets. For further functionality
take a look at the ``See also`` section.
Parameters
-----------
data : `numpy.ndarray`-like or `NDData`-like
The dataset.
uncertainty : any type, optional
Uncertainty in the dataset.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, for example ``"std"`` for standard deviation or
``"var"`` for variance. A metaclass defining such an interface is
`NDUncertainty` - but isn't mandatory. If the uncertainty has no such
attribute the uncertainty is stored as `UnknownUncertainty`.
Defaults to ``None``.
mask : any type, optional
Mask for the dataset. Masks should follow the ``numpy`` convention that
**valid** data points are marked by ``False`` and **invalid** ones with
``True``.
Defaults to ``None``.
wcs : any type, optional
World coordinate system (WCS) for the dataset.
Default is ``None``.
meta : `dict`-like object, optional
Additional meta information about the dataset. If no meta is provided
an empty `collections.OrderedDict` is created.
Default is ``None``.
unit : `~astropy.units.Unit`-like or str, optional
Unit for the dataset. Strings that can be converted to a
`~astropy.units.Unit` are allowed.
Default is ``None``.
copy : `bool`, optional
Indicates whether to save the arguments as copy. ``True`` copies
every attribute before saving it while ``False`` tries to save every
parameter as reference.
Note however that it is not always possible to save the input as
reference.
Default is ``False``.
.. versionadded:: 1.2
Raises
------
TypeError
In case ``data`` or ``meta`` don't meet the restrictions.
Notes
-----
Each attribute can be accessed through the homonymous instance attribute:
``data`` in a `NDData` object can be accessed through the `data`
attribute::
>>> from astropy.nddata import NDData
>>> nd = NDData([1,2,3])
>>> nd.data
array([1, 2, 3])
Given a conflicting implicit and an explicit parameter during
initialization, for example the ``data`` is a `~astropy.units.Quantity` and
the unit parameter is not ``None``, then the implicit parameter is replaced
(without conversion) by the explicit one and a warning is issued::
>>> import numpy as np
>>> import astropy.units as u
>>> q = np.array([1,2,3,4]) * u.m
>>> nd2 = NDData(q, unit=u.cm)
INFO: overwriting Quantity's current unit with specified unit. [astropy.nddata.nddata]
>>> nd2.data # doctest: +FLOAT_CMP
array([1., 2., 3., 4.])
>>> nd2.unit
Unit("cm")
See also
--------
NDDataRef
NDDataArray
"""
# Instead of a custom property use the MetaData descriptor also used for
# Tables. It will check if the meta is dict-like or raise an exception.
meta = MetaData(doc=_meta_doc, copy=False)
def __init__(self, data, uncertainty=None, mask=None, wcs=None,
meta=None, unit=None, copy=False):
# Rather pointless since the NDDataBase does not implement any setting
# but before the NDDataBase did call the uncertainty
# setter. But if anyone wants to alter this behaviour again the call
# to the superclass NDDataBase should be in here.
super().__init__()
# Check if data is any type from which to collect some implicitly
# passed parameters.
if isinstance(data, NDData): # don't use self.__class__ (issue #4137)
# Of course we need to check the data because subclasses with other
# init-logic might be passed in here. We could skip these
# tests if we compared for self.__class__ but that has other
# drawbacks.
# Comparing if there is an explicit and an implicit unit parameter.
# If that is the case use the explicit one and issue a warning
# that there might be a conflict. In case there is no explicit
# unit just overwrite the unit parameter with the NDData.unit
# and proceed as if that one was given as parameter. Same for the
# other parameters.
if (unit is not None and data.unit is not None and
unit != data.unit):
log.info("overwriting NDData's current "
"unit with specified unit.")
elif data.unit is not None:
unit = data.unit
if uncertainty is not None and data.uncertainty is not None:
log.info("overwriting NDData's current "
"uncertainty with specified uncertainty.")
elif data.uncertainty is not None:
uncertainty = data.uncertainty
if mask is not None and data.mask is not None:
log.info("overwriting NDData's current "
"mask with specified mask.")
elif data.mask is not None:
mask = data.mask
if wcs is not None and data.wcs is not None:
log.info("overwriting NDData's current "
"wcs with specified wcs.")
elif data.wcs is not None:
wcs = data.wcs
if meta is not None and data.meta is not None:
log.info("overwriting NDData's current "
"meta with specified meta.")
elif data.meta is not None:
meta = data.meta
data = data.data
else:
if hasattr(data, 'mask') and hasattr(data, 'data'):
# Separating data and mask
if mask is not None:
log.info("overwriting Masked Objects's current "
"mask with specified mask.")
else:
mask = data.mask
# Just save the data for further processing, we could be given
# a masked Quantity or something else entirely. Better to check
# it first.
data = data.data
if isinstance(data, Quantity):
if unit is not None and unit != data.unit:
log.info("overwriting Quantity's current "
"unit with specified unit.")
else:
unit = data.unit
data = data.value
# Quick check on the parameters if they match the requirements.
if (not hasattr(data, 'shape') or not hasattr(data, '__getitem__') or
not hasattr(data, '__array__')):
# Data doesn't look like a numpy array, try converting it to
# one.
data = np.array(data, subok=True, copy=False)
# Another quick check to see if what we got looks like an array
# rather than an object (since numpy will convert a
# non-numerical/non-string inputs to an array of objects).
if data.dtype == 'O':
raise TypeError("could not convert data to numpy array.")
if unit is not None:
unit = Unit(unit)
if copy:
# Data might have been copied before but no way of validating
# without another variable.
data = deepcopy(data)
mask = deepcopy(mask)
wcs = deepcopy(wcs)
meta = deepcopy(meta)
uncertainty = deepcopy(uncertainty)
# Actually - copying the unit is unnecessary but better safe
# than sorry :-)
unit = deepcopy(unit)
# Store the attributes
self._data = data
self.mask = mask
self._wcs = wcs
self.meta = meta # TODO: Make this call the setter sometime
self._unit = unit
# Call the setter for uncertainty to further check the uncertainty
self.uncertainty = uncertainty
def __str__(self):
return str(self.data)
def __repr__(self):
prefix = self.__class__.__name__ + '('
body = np.array2string(self.data, separator=', ', prefix=prefix)
return ''.join([prefix, body, ')'])
@property
def data(self):
"""
`~numpy.ndarray`-like : The stored dataset.
"""
return self._data
@property
def mask(self):
"""
any type : Mask for the dataset, if any.
Masks should follow the ``numpy`` convention that valid data points are
marked by ``False`` and invalid ones with ``True``.
"""
return self._mask
@mask.setter
def mask(self, value):
self._mask = value
@property
def unit(self):
"""
`~astropy.units.Unit` : Unit for the dataset, if any.
"""
return self._unit
@property
def wcs(self):
"""
any type : A world coordinate system (WCS) for the dataset, if any.
"""
return self._wcs
@property
def uncertainty(self):
"""
any type : Uncertainty in the dataset, if any.
Should have an attribute ``uncertainty_type`` that defines what kind of
uncertainty is stored, such as ``'std'`` for standard deviation or
``'var'`` for variance. A metaclass defining such an interface is
`~astropy.nddata.NDUncertainty` but isn't mandatory.
"""
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
# There is one requirements on the uncertainty: That
# it has an attribute 'uncertainty_type'.
# If it does not match this requirement convert it to an unknown
# uncertainty.
if not hasattr(value, 'uncertainty_type'):
log.info('uncertainty should have attribute uncertainty_type.')
value = UnknownUncertainty(value, copy=False)
# If it is a subclass of NDUncertainty we must set the
# parent_nddata attribute. (#4152)
if isinstance(value, NDUncertainty):
# In case the uncertainty already has a parent create a new
# instance because we need to assume that we don't want to
# steal the uncertainty from another NDData object
if value._parent_nddata is not None:
value = value.__class__(value, copy=False)
# Then link it to this NDData instance (internally this needs
# to be saved as weakref but that's done by NDUncertainty
# setter).
value.parent_nddata = self
self._uncertainty = value
| {
"content_hash": "29bc62d90dfa3027051522d989e76de5",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 94,
"avg_line_length": 38.0625,
"alnum_prop": 0.586639011321407,
"repo_name": "DougBurke/astropy",
"id": "5759ecc62abd3be87262bf3ed3ba6fb1ee472e6a",
"size": "11685",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/nddata/nddata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "367279"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Python",
"bytes": "8390850"
},
{
"name": "TeX",
"bytes": "805"
}
],
"symlink_target": ""
} |
import rospy
from monitored_navigation.monitor_state import MonitorState
from std_msgs.msg import Bool
from sensor_msgs.msg import Joy
class MonitorPause(MonitorState):
def __init__(self, is_paused=False):
#if is_paused monitors for resume, else monitors for pause
self.is_paused=is_paused
self.pub=rospy.Publisher("monitored_navigation/pause_requested", Bool, queue_size=1)
#pause with joy
self.pad_paused=False
rospy.Subscriber("/teleop_joystick/joy",Joy,self.joy_cb)
MonitorState.__init__(self, "/monitored_navigation/pause_requested", Bool, self.monitor_cb)
def monitor_cb(self, ud, msg):
if self.is_paused:
return msg.data
else:
return not msg.data
def joy_cb(self,msg):
if msg.buttons[4]==0:
self.pad_paused=False
else:
self.pad_paused=True
self.pub.publish(self.pad_paused)
| {
"content_hash": "8329221a739065ddffb9e7a47732a6b7",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 100,
"avg_line_length": 24.682926829268293,
"alnum_prop": 0.6017786561264822,
"repo_name": "strands-project/strands_recovery_behaviours",
"id": "bd7a04a57c99b1cd1f761e1df5579f776f132042",
"size": "1012",
"binary": false,
"copies": "1",
"ref": "refs/heads/hydro-devel",
"path": "strands_monitored_nav_states/src/strands_monitored_nav_states/monitor_pause.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8990"
},
{
"name": "CMake",
"bytes": "19939"
},
{
"name": "Python",
"bytes": "65303"
}
],
"symlink_target": ""
} |
"""This is mostly an example opf how to organize a class for the wavescupltor
motor controller None of this code is tested at all and will likely require
some good work."""
import logging
import struct
def parseWSPacket(rx):
# pack byte array into a string
# this might be the rverse order
lowerArray = struct.pack('BBBB', rx.data[3], rx.data[2], rx.data[1], rx.data[0])
upperArray = struct.pack('BBBB', rx.data[7], rx.data[6], rx.data[5], rx.data[4])
# unpack string into a float
lowerFloat = struct.unpack('f', lowerArray)
upperFloat = struct.unpack('f', upperArray)
return [lowerFloat, upperFloat]
class wavesculptor22():
# initialize the object
def __init__(self, can, baseaddr):
# handle to the can interface
self.can = can
# baseaddress of ws22
self.baseaddr = baseaddr
logging.info('Initializing Wavesculptor 0x03X' % baseaddr)
def getStateData(self):
# set the timeout to slightly longer then interval
to = 0.25
# grab some packets from the bus
StatusPkt = self.can.WaitForPacket(self.baseaddr + 1, to)
BusMeasPkt = self.can.WaitForPacket(self.baseaddr + 2, to)
VelocityPkt = self.can.WaitForPacket(self.baseaddr + 3, to)
PhaseCurrentPkt = self.can.WaitForPacket(self.baseaddr + 4, to)
MotorVoltagePkt = self.can.WaitForPacket(self.baseaddr + 4, to)
# parse them
[self.busVoltage, self.busCurrent] = parseWSPacket(BusMeasPkt)
[self.motorVelocity, self.vehicleVelocity] = parseWSPacket(VelocityPkt)
[self.iC, self.iB] = parseWSPacket(PhaseCurrentPkt)
[self.vD, self.vQ] = parseWSPacket(MotorVoltagePkt)
| {
"content_hash": "492637b49c2dd416e311988bb7c6697c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 84,
"avg_line_length": 33.588235294117645,
"alnum_prop": 0.6701692936368944,
"repo_name": "dilithiumpower/mppt_config",
"id": "70a958550be8d444946d7343d250c9265c15fd68",
"size": "1714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/ws22.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53158"
}
],
"symlink_target": ""
} |
"""Tests for tfx.dsl.input_resolution.ops.exclude_spans_op."""
import tensorflow as tf
from tfx import types
from tfx.dsl.input_resolution.ops import ops
from tfx.dsl.input_resolution.ops import test_utils
class ArtifactWithoutSpan(types.Artifact):
"""An Artifact without "span" as a PROPERTY."""
TYPE_NAME = 'ArtifactWithoutSpan'
class ExcludeSpansOpTest(tf.test.TestCase):
def testExcludeSpans_Empty(self):
actual = test_utils.run_resolver_op(ops.ExcludeSpans, [])
self.assertEqual(actual, [])
def testExcludeSpans_SingleEntry(self):
a1 = test_utils.DummyArtifact()
a1.span = 1
artifacts = [a1]
actual = test_utils.run_resolver_op(ops.ExcludeSpans, artifacts)
self.assertEqual(actual, [a1])
actual = test_utils.run_resolver_op(
ops.ExcludeSpans, artifacts, denylist=[1])
self.assertEqual(actual, [])
actual = test_utils.run_resolver_op(
ops.ExcludeSpans, artifacts, denylist=[2])
self.assertEqual(actual, [a1])
def testExcludeSpans(self):
a1 = test_utils.DummyArtifact()
a2 = test_utils.DummyArtifact()
a3 = test_utils.DummyArtifact()
a4 = ArtifactWithoutSpan()
a1.span = 1
a2.span = 2
a3.span = 2
artifacts = [a1, a2, a3, a4]
actual = test_utils.run_resolver_op(
ops.ExcludeSpans, artifacts, denylist=[])
self.assertEqual(actual, [a1, a2, a3])
actual = test_utils.run_resolver_op(
ops.ExcludeSpans, artifacts, denylist=[1])
self.assertEqual(actual, [a2, a3])
actual = test_utils.run_resolver_op(
ops.ExcludeSpans, artifacts, denylist=[2])
self.assertEqual(actual, [a1])
actual = test_utils.run_resolver_op(
ops.ExcludeSpans, artifacts, denylist=[1, 2])
self.assertEqual(actual, [])
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "060d3c1f4abe7275fc519ae3afbbe3cd",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 68,
"avg_line_length": 27.08955223880597,
"alnum_prop": 0.6721763085399449,
"repo_name": "tensorflow/tfx",
"id": "1d7d3ebd1e7702b99ba127750f599a587a92a307",
"size": "2411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfx/dsl/input_resolution/ops/exclude_spans_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7405"
},
{
"name": "Jupyter Notebook",
"bytes": "38579"
},
{
"name": "Python",
"bytes": "6009050"
},
{
"name": "Shell",
"bytes": "34056"
},
{
"name": "Starlark",
"bytes": "20324"
}
],
"symlink_target": ""
} |
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to wrap the oc command line tools '''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for OpenshiftOC '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
# pylint: disable=no-member
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if exist_volume.has_key('name') and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if exist_volume_mount.has_key('name') and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if exist_vol_mount.has_key('mountPath') and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if volume.has_key('secret'):
results.append(exist_volume.has_key('secret'))
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif volume.has_key('emptyDir'):
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif volume.has_key('persistentVolumeClaim'):
pvc = 'persistentVolumeClaim'
results.append(exist_volume.has_key(pvc))
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if volume[pvc].has_key('claimSize'):
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif volume.has_key('hostpath'):
results.append(exist_volume.has_key('hostPath'))
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# pylint: disable=too-many-instance-attributes
class OCEnv(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
container_path = {"pod": "spec.containers[0].env",
"dc": "spec.template.spec.containers[0].env",
"rc": "spec.template.spec.containers[0].env",
}
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
namespace,
kind,
env_vars,
resource_name=None,
list_all=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCEnv, self).__init__(namespace, kubeconfig)
self.kind = kind
self.name = resource_name
self.namespace = namespace
self.list_all = list_all
self.env_vars = env_vars
self.kubeconfig = kubeconfig
self.verbose = verbose
self._resource = None
@property
def resource(self):
''' property function for resource var'''
if not self._resource:
self.get()
return self._resource
@resource.setter
def resource(self, data):
''' setter function for resource var'''
self._resource = data
def value_exists(self, key, value):
''' return whether a key, value pair exists '''
return self.resource.exists_env_value(key, value)
def key_exists(self, key):
''' return whether a key, value pair exists '''
return self.resource.exists_env_key(key)
def get(self):
'''return a environment variables '''
result = self._get(self.kind, self.name)
if result['returncode'] == 0:
if self.kind == 'dc':
self.resource = DeploymentConfig(content=result['results'][0])
result['results'] = self.resource.get(OCEnv.container_path[self.kind]) or []
return result
def delete(self):
'''return all pods '''
#yed.put(OCEnv.container_path[self.kind], env_vars_array)
if self.resource.delete_env_var(self.env_vars.keys()):
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
return {'returncode': 0, 'changed': False}
# pylint: disable=too-many-function-args
def put(self):
'''place env vars into dc '''
for update_key, update_value in self.env_vars.items():
self.resource.update_env_var(update_key, update_value)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
def main():
'''
ansible oc module for services
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
kind=dict(default='rc', choices=['dc', 'rc', 'pods'], type='str'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
env_vars=dict(default=None, type='dict'),
list_all=dict(default=False, type='bool'),
),
mutually_exclusive=[["content", "files"]],
supports_check_mode=True,
)
ocenv = OCEnv(module.params['namespace'],
module.params['kind'],
module.params['env_vars'],
resource_name=module.params['name'],
list_all=module.params['list_all'],
kubeconfig=module.params['kubeconfig'],
verbose=module.params['debug'])
state = module.params['state']
api_rval = ocenv.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
for key in module.params.get('env_vars', {}).keys():
if ocenv.resource.exists_env_key(key):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = ocenv.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
for key, value in module.params.get('env_vars', {}).items():
if not ocenv.value_exists(key, value):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = ocenv.put()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = ocenv.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| {
"content_hash": "095848f5aeb708337a8b751fbfc15847",
"timestamp": "",
"source": "github",
"line_count": 1394,
"max_line_length": 118,
"avg_line_length": 32.82209469153515,
"alnum_prop": 0.5348821960921449,
"repo_name": "appuio/ansible-role-openshift-zabbix-monitoring",
"id": "6d2f1e596f7894deee39451129f94ba54bc5f448",
"size": "46165",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vendor/openshift-tools/ansible/roles/lib_openshift_3.2/library/oc_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3095"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
from c7n.utils import local_session, chunks, type_schema
from .core import Filter
class HealthEventFilter(Filter):
"""Check if there are health events related to the resources
Health events are stored as annotation on a resource.
"""
schema = type_schema(
'health-event',
types={'type': 'array', 'items': {'type': 'string'}},
statuses={'type': 'array', 'items': {
'type': 'string',
'enum': ['open', 'upcoming', 'closed']
}})
permissions = ('health:DescribeEvents', 'health:DescribeAffectedEntities',
'health:DescribeEventDetails')
def process(self, resources, event=None):
if not resources:
return resources
client = local_session(self.manager.session_factory).client(
'health', region_name='us-east-1')
f = self.get_filter_parameters()
resource_map = {r[self.manager.get_model().id]: r for r in resources}
found = set()
seen = set()
for resource_set in chunks(resource_map.keys(), 100):
f['entityValues'] = resource_set
events = client.describe_events(filter=f)['events']
events = [e for e in events if e['arn'] not in seen]
entities = self.process_event(events)
event_map = {e['arn']: e for e in events}
for e in entities:
rid = e['entityValue']
if rid not in resource_map:
continue
resource_map[rid].setdefault(
'c7n:HealthEvent', []).append(event_map[e['eventArn']])
found.add(rid)
seen.update(event_map.keys())
return [resource_map[resource_id] for resource_id in found]
def get_filter_parameters(self):
m = self.manager
if m.data['resource'] == 'ebs':
service = 'EBS'
else:
service = m.get_model().service.upper()
f = {'services': [service],
'regions': [self.manager.config.region],
'eventStatusCodes': self.data.get(
'statuses', ['open', 'upcoming'])}
if self.data.get('types'):
f['eventTypeCodes'] = self.data.get('types')
return f
def process_event(self, health_events):
entities = []
client = local_session(self.manager.session_factory).client(
'health', region_name='us-east-1')
for event_set in chunks(health_events, 10):
event_map = {e['arn']: e for e in event_set}
event_arns = list(event_map.keys())
for d in client.describe_event_details(
eventArns=event_arns).get('successfulSet', ()):
event_map[d['event']['arn']]['Description'] = d[
'eventDescription']['latestDescription']
paginator = client.get_paginator('describe_affected_entities')
entities.extend(list(itertools.chain(
*[p['entities'] for p in paginator.paginate(
filter={'eventArns': event_arns})])))
return entities
| {
"content_hash": "fc674444bd1d9c3adaf3d09515bdc280",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 82,
"avg_line_length": 38.09411764705882,
"alnum_prop": 0.5558987029030266,
"repo_name": "siddartha1992/cloud-custodian",
"id": "12e2e1510bf9e4718e3ba6c36a5996a84da4adcf",
"size": "3823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c7n/filters/health.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1251"
},
{
"name": "Python",
"bytes": "1546704"
}
],
"symlink_target": ""
} |
import base64
import datetime
import os
import tornado
import tornado.web
import tornado.gen
import tornado.httpclient
from tornado.auth import GoogleOAuth2Mixin
from oauth2client import GOOGLE_REVOKE_URI, GOOGLE_TOKEN_URI
from oauth2client.client import OAuth2Credentials, _extract_id_token
from handler_base import HandlerBase
class AuthHandler(HandlerBase, GoogleOAuth2Mixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
# self_redirect_uri should be similar to 'http://<host>/auth/'
self_redirect_uri = self.request.full_url()
idx = self_redirect_uri.index("auth/")
self_redirect_uri = self_redirect_uri[0:(idx + len("auth/"))]
code = self.get_argument('code', False)
if code is not False:
user = yield self.get_authenticated_user(redirect_uri=self_redirect_uri, code=code)
self.logger.debug("got user: " + repr(user))
creds = self.make_credentials(user)
tokfile = self.settings['token_file']
tokdir = os.path.dirname(tokfile)
if not os.path.exists(tokdir):
os.makedirs(tokdir)
os.chmod(tokdir, 0700)
with open(tokfile, 'w') as gtok_file:
gtok_file.write(base64.b64encode(creds.to_json()))
self.redirect('/')
return
else:
scope = ['https://www.googleapis.com/auth/drive', 'email']
extra_params = {
'approval_prompt': 'auto', # auto / force
'access_type': 'offline',
'include_granted_scopes': 'true'
}
yield self.authorize_redirect(redirect_uri=self_redirect_uri,
client_id=self.settings['google_oauth']['key'],
scope=scope,
response_type='code',
extra_params=extra_params)
def make_credentials(self, user):
token_expiry = datetime.datetime.utcnow() + datetime.timedelta(seconds=int(user['expires_in']))
id_token = _extract_id_token(user['id_token'])
credential = OAuth2Credentials(
access_token=user['access_token'],
client_id=self.settings['google_oauth']['key'],
client_secret=self.settings['google_oauth']['secret'],
refresh_token=user['refresh_token'],
token_expiry=token_expiry,
token_uri=GOOGLE_TOKEN_URI,
user_agent=None,
revoke_uri=GOOGLE_REVOKE_URI,
id_token=id_token,
token_response=user)
return credential
| {
"content_hash": "942a1d409004db4c301b8c6b52882d28",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 103,
"avg_line_length": 39.9264705882353,
"alnum_prop": 0.5731123388581952,
"repo_name": "tanmaykm/pygdrive",
"id": "405cb5363b990da30defe1dca98faf364a849e68",
"size": "2715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygdrive/authhandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3085"
},
{
"name": "Python",
"bytes": "18104"
}
],
"symlink_target": ""
} |
import os
import sys
if sys.version_info[0] < 3: raise Exception("Python 3 or a more recent version is required.")
import pprint
import argparse
import urllib.request
from datetime import datetime
import shutil
from time import sleep
# ==============================================================================
# == ARGUMENTS =================================================================
# ==============================================================================
def addCommonFlags(parser):
parser.add_argument("compiler", choices=['msvc', 'gcc', 'clang'], default='msvc', help = "compiler to use")
parser.add_argument("--debug", action = "store_true", help = "build in debug")
parser.add_argument("--catch", action = "store_true", help = "use Catch instead of doctest")
parser.add_argument("--disabled", action = "store_true", help = "DOCTEST_CONFIG_DISABLE / CATCH_CONFIG_DISABLE")
parser.add_argument("--fast", action = "store_true", help = "define the doctest/Catch fast config identifier")
parser.add_argument("--files", type=int, default=1, help = "number of source files (besides the implementation)")
parser.add_argument("--tests", type=int, default=1, help = "number of test cases per source file")
parser.add_argument("--checks", type=int, default=1, help = "number of asserts per test case")
parser.add_argument("--asserts", choices=['normal', 'binary'], default="normal",
help = "<doctest> type of assert used - Catch: only normal")
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_c = subparsers.add_parser('compile', help='benchmark compile times')
addCommonFlags(parser_c)
parser_c.add_argument("--implement", action = "store_true", help = "implement the framework test runner")
parser_c.add_argument("--header", action = "store_true", help = "include the framework header everywhere")
parser_r = subparsers.add_parser('runtime', help='benchmark runtime')
addCommonFlags(parser_r)
parser_r.add_argument("--loop-iters", type=int, default=1000, help = "loop N times all asserts in each test case")
parser_r.add_argument("--info", action = "store_true", help = "log the loop variable with INFO()")
def compile(args): args.compile = True; args.runtime = False
def runtime(args): args.compile = False; args.runtime = True
parser_c.set_defaults(func=compile)
parser_r.set_defaults(func=runtime)
args = parser.parse_args()
args.func(args)
print("== PASSED OPTIONS TO BENCHMARK SCRIPT:")
pprint.pprint(vars(args), width = 1)
# ==============================================================================
# == SETUP ENVIRONMENT =========================================================
# ==============================================================================
# catch version
catch_ver = "2.3.0"
catch_header = "catch." + catch_ver + ".hpp"
# get the catch header
if not os.path.exists("catch." + catch_ver + ".hpp"):
urllib.request.urlretrieve("https://github.com/catchorg/Catch2/releases/download/v" + catch_ver + "/catch.hpp", catch_header)
# folder with generated code
the_folder = 'project'
# delete the folder
if os.path.exists(the_folder):
shutil.rmtree(the_folder)
# wait a bit or the script might fail...
sleep(2)
# create the folder
if not os.path.exists(the_folder):
os.makedirs(the_folder)
# enter folder
os.chdir(the_folder);
# ==============================================================================
# == DO STUFF ==================================================================
# ==============================================================================
# setup defines used
defines = ""
if args.catch and args.disabled:
defines += "#define CATCH_CONFIG_DISABLE\n"
if not args.catch and args.disabled:
defines += "#define DOCTEST_CONFIG_DISABLE\n"
if args.catch and args.fast:
defines += "#define CATCH_CONFIG_FAST_COMPILE\n"
if not args.catch and args.fast:
defines += "#define DOCTEST_CONFIG_SUPER_FAST_ASSERTS\n"
define_implement = "#define DOCTEST_CONFIG_IMPLEMENT\n"
if args.catch:
define_implement = "#define CATCH_CONFIG_RUNNER\n"
# setup the macros used
macro = " CHECK(a == b);\n"
if args.runtime:
macro = " CHECK(i == i);\n"
if not args.catch and args.asserts == "binary":
macro = " CHECK_EQ(a, b);\n"
# setup the header used
include = '#include "doctest.h"\n'
if args.catch:
include = '#include "' + catch_header + '"\n'
# ==============================================================================
# == GENERATE SOURCE CODE ======================================================
# ==============================================================================
# make the source files
for i in range(0, args.files):
f = open(str(i) + '.cpp', 'w')
if args.runtime or args.header:
f.write(defines)
f.write(include)
for t in range(0, args.tests):
f.write('TEST_CASE("") {\n')
f.write(' int a = 5;\n')
f.write(' int b = 5;\n')
if args.runtime and args.loop_iters > 0:
f.write(' for(int i = 0; i < ' + str(args.loop_iters) + '; ++i) {\n')
if args.runtime and args.info:
f.write(' INFO(i);\n')
for a in range(0, args.checks):
if args.runtime and args.loop_iters > 0:
f.write(' ')
f.write(macro)
if args.runtime and args.loop_iters > 0:
f.write(' }\n')
f.write('}\n\n')
f.write('int f' + str(i) + '() { return ' + str(i) + '; }\n\n')
f.close()
# the main file
f = open('main.cpp', 'w')
if args.runtime or args.implement or args.header:
f.write(defines)
f.write(define_implement)
f.write(include)
f.write('int main(int argc, char** argv) {\n')
if args.runtime or args.implement or args.header:
if not args.catch: f.write(' int res = doctest::Context(argc, argv).run();\n')
else: f.write(' int res = Catch::Session().run(argc, argv);\n')
else:
f.write(' int res = 0;\n')
for i in range(0, args.files):
f.write(' int f' + str(i) + '(); res += f' + str(i) + '();\n')
f.write(' return res;\n}\n')
f.close()
# the cmake file
f = open('CMakeLists.txt', 'w')
f.write('cmake_minimum_required(VERSION 2.8)\n\n')
f.write('project(bench)\n\n')
f.write('if(NOT MSVC)\n')
f.write('set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")\n')
f.write('endif()\n\n')
if not args.catch: f.write('include_directories("../../../doctest/")\n\n')
else: f.write('include_directories("../")\n\n')
f.write('add_executable(bench main.cpp\n')
for i in range(0, args.files):
f.write(' ' + str(i) + '.cpp\n')
f.write(')\n')
f.close()
# ==============================================================================
# == INVOKE CMAKE ==============================================================
# ==============================================================================
compiler = ""
if args.compiler == 'clang':
compiler = " -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_CXX_FLAGS=-w"
if args.compiler == 'gcc':
compiler = " -DCMAKE_CXX_COMPILER=g++ -DCMAKE_CXX_FLAGS=-w"
# setup cmake command
cmake_command = 'cmake . -G "Visual Studio 15 Win64"' # MSVC 2017
if args.compiler != 'msvc':
cmake_command = 'cmake . -G "MinGW Makefiles" -DCMAKE_BUILD_TYPE=' + ('Debug' if args.debug else 'Release')
if os.name != "nt":
cmake_command = 'cmake . -DCMAKE_BUILD_TYPE=' + ('Debug' if args.debug else 'Release')
os.system(cmake_command + compiler)
# ==============================================================================
# == BUILD PROJECT =============================================================
# ==============================================================================
the_config = ''
if args.compiler == 'msvc':
if args.debug: the_config = ' --config Debug'
else: the_config = ' --config Release'
# build it
start = datetime.now()
os.system('cmake --build .' + the_config)
end = datetime.now()
if not args.runtime:
print("Time running compiler (+ linker) in seconds: " + str((end - start).total_seconds()))
# ==============================================================================
# == RUN PROJECT ===============================================================
# ==============================================================================
if args.runtime:
start = datetime.now()
if args.compiler == 'msvc':
os.system(('Debug' if args.debug else 'Release') + '\\bench.exe')
elif os.name == "nt":
os.system('bench.exe')
else:
os.system('./bench')
end = datetime.now()
print("Time running the tests in seconds: " + str((end - start).total_seconds()))
# leave folder
os.chdir("../");
| {
"content_hash": "75947d10353748cc73bae7843b2d8754",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 129,
"avg_line_length": 38.242553191489364,
"alnum_prop": 0.5115166351396462,
"repo_name": "onqtam/doctest",
"id": "0aaa981a80d9ccb2703df659b16e510e4e2b3554",
"size": "9007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/bench/bench.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "705"
},
{
"name": "C",
"bytes": "115"
},
{
"name": "C++",
"bytes": "548154"
},
{
"name": "CMake",
"bytes": "28009"
},
{
"name": "Meson",
"bytes": "156"
},
{
"name": "Python",
"bytes": "19666"
}
],
"symlink_target": ""
} |
"""Utility functions for Process invocation and management."""
from __future__ import unicode_literals
import os
import subprocess
import sys
from reviewbot.utils.log import get_logger
logger = get_logger(__name__)
def execute(command,
env=None,
split_lines=False,
ignore_errors=False,
extra_ignore_errors=(),
translate_newlines=True,
with_errors=True,
return_errors=False,
none_on_ignored_error=False):
"""Execute a command and return the output.
Args:
command (list of unicode):
The command to run.
env (dict, optional):
The environment variables to use when running the process.
split_lines (bool, optional):
Whether to return the output as a list (split on newlines) or a
single string.
ignore_errors (bool, optional):
Whether to ignore non-zero return codes from the command.
extra_ignore_errors (tuple of int, optional):
Process return codes to ignore.
translate_newlines (bool, optional):
Whether to convert platform-specific newlines (such as \\r\\n) to
the regular newline (\\n) character.
with_errors (bool, optional):
Whether the stderr output should be merged in with the stdout
output or just ignored.
return_errors (bool, optional)
Whether to return the content of the stderr stream. If set, this
argument takes precedence over the ``with_errors`` argument.
none_on_ignored_error (bool, optional):
Whether to return ``None`` if there was an ignored error (instead
of the process output).
Returns:
object:
This returns a single value or 2-tuple, depending on the arguments.
If ``return_errors`` is ``True``, this will return the standard output
and standard errors as strings in a tuple. Otherwise, this will just
result the standard output as a string.
If ``split_lines`` is ``True``, those strings will instead be lists
of lines (preserving newlines).
All resulting strings will be Unicode.
"""
if isinstance(command, list):
logger.debug(subprocess.list2cmdline(command))
else:
logger.debug(command)
if env:
env.update(os.environ)
else:
env = os.environ.copy()
env['LC_ALL'] = 'en_US.UTF-8'
env['LANGUAGE'] = 'en_US.UTF-8'
if with_errors and not return_errors:
errors_output = subprocess.STDOUT
else:
errors_output = subprocess.PIPE
if sys.platform.startswith('win'):
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=errors_output,
shell=False,
universal_newlines=translate_newlines,
env=env)
else:
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=errors_output,
shell=False,
close_fds=True,
universal_newlines=translate_newlines,
env=env)
data, errors = p.communicate()
if isinstance(data, bytes):
data = data.decode('utf-8')
if split_lines:
data = data.splitlines(True)
if return_errors:
if split_lines:
errors = errors.splitlines(True)
else:
errors = None
rc = p.wait()
if rc and not ignore_errors and rc not in extra_ignore_errors:
raise Exception('Failed to execute command: %s\n%s' % (command, data))
if rc and none_on_ignored_error:
data = None
if return_errors:
return data, errors
else:
return data
def is_exe_in_path(name, cache={}):
"""Check whether an executable is in the user's search path.
If the provided filename is an absolute path, it will be checked
directly without looking in the search path.
Version Changed:
3.0:
Added the ``cache`` parameter.
Args:
name (unicode):
The name of the executable, without any platform-specific
executable extension. The extension will be appended if necessary.
cache (dict, optional):
A result cache, to avoid repeated lookups.
This will store the paths to any files that are found (or ``None``
if not found).
By default, the cache is shared across all calls. A custom cache
can be provided instead.
Returns:
boolean:
True if the executable can be found in the execution path.
"""
if sys.platform == 'win32' and not name.endswith('.exe'):
name += '.exe'
if name in cache:
return cache[name]
path = None
if os.path.isabs(name):
if os.path.exists(name):
path = name
else:
for dirname in os.environ['PATH'].split(os.pathsep):
temp_path = os.path.abspath(os.path.join(dirname, name))
if os.path.exists(temp_path):
path = temp_path
break
cache[name] = path
return path is not None
| {
"content_hash": "7c2ba2ded4811d632719c58e4d9a2a57",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 78,
"avg_line_length": 29.52972972972973,
"alnum_prop": 0.5731283177741168,
"repo_name": "reviewboard/ReviewBot",
"id": "f03c7190585ab7fd3a490aa71eb24b65c62dca39",
"size": "5463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/reviewbot/utils/process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "8061"
},
{
"name": "HTML",
"bytes": "1522"
},
{
"name": "JavaScript",
"bytes": "21446"
},
{
"name": "Less",
"bytes": "1115"
},
{
"name": "Python",
"bytes": "741709"
},
{
"name": "Shell",
"bytes": "4439"
}
],
"symlink_target": ""
} |
import os
LOG_STDOUT = True
# Maximum number of rows to be returned from Solr
CITATION_HELPER_MAX_HITS = 10000
# Maximum number of bibcodes in input (excess will be ignored)
CITATION_HELPER_MAX_INPUT = 500
# Maximum number allowed in submitted bibcodes
CITATION_HELPER_MAX_SUBMITTED = 100
# Bibcode input list will be split into chunks of this size
CITATION_HELPER_CHUNK_SIZE = 100
# The maximum number of suggestions returned by the service
CITATION_HELPER_NUMBER_SUGGESTIONS = 10
# Minimal score for papers to be included in results
CITATION_HELPER_THRESHOLD_FREQUENCY = 1
# Where to query Solr
CITATION_HELPER_SOLR_PATH = 'https://api.adsabs.harvard.edu/v1/search/query'
# In what environment are we?
ENVIRONMENT = os.getenv('ENVIRONMENT', 'staging').lower()
# Config for logging
CITATION_HELPER_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(levelname)s\t%(process)d '
'[%(asctime)s]:\t%(message)s',
'datefmt': '%m/%d/%Y %H:%M:%S',
}
},
'handlers': {
'file': {
'formatter': 'default',
'level': 'INFO',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': '/tmp/citation_helper_service.app.{}.log'.format(ENVIRONMENT),
},
'console': {
'formatter': 'default',
'level': 'INFO',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'': {
'handlers': ['file', 'console'],
'level': 'INFO',
'propagate': True,
},
},
}
# Define the autodiscovery endpoint
DISCOVERER_PUBLISH_ENDPOINT = '/resources'
# Advertise its own route within DISCOVERER_PUBLISH_ENDPOINT
DISCOVERER_SELF_PUBLISH = False
# must be here for adsmutils to override it using env vars
# but if left empty (resolving to False) it won't be used
SERVICE_TOKEN = None
| {
"content_hash": "85c20bb7af5c88e42e00690b121b7df4",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 86,
"avg_line_length": 33.724137931034484,
"alnum_prop": 0.6273006134969326,
"repo_name": "adsabs/citation_helper_service",
"id": "b278d23db00c081e6543bdf818333daf2e0c4f07",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23214"
}
],
"symlink_target": ""
} |
import numpy as np
def _summarize_peaks(peaks):
"""
merge peaks position if closer than 10
"""
previous = peaks[0]
new_peaks = [previous]
for pos in peaks:
if pos > previous + 10:
new_peaks.add(pos)
previous = pos
return new_peaks
def find_mature(x, y, win=10):
"""
Window apprach to find hills in the expression profile
"""
previous = min(y)
peaks = []
intervals = range(x, y, win)
for pos in intervals:
if y[pos] > previous * 10:
previous = y[pos]
peaks.add(pos)
peaks = _summarize_peaks(peaks)
| {
"content_hash": "7e731d9a41950744132b9fa0c9d62c57",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 58,
"avg_line_length": 22.178571428571427,
"alnum_prop": 0.5603864734299517,
"repo_name": "lpantano/seqcluster",
"id": "c22d20f8abedcb296c400a8d3950f4fa9fede511",
"size": "621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seqcluster/function/peakdetect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42960"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "HTML",
"bytes": "1038435"
},
{
"name": "JavaScript",
"bytes": "4712"
},
{
"name": "Mathematica",
"bytes": "449972"
},
{
"name": "Python",
"bytes": "268720"
},
{
"name": "R",
"bytes": "1931"
},
{
"name": "Shell",
"bytes": "6657"
}
],
"symlink_target": ""
} |
from django.core.management.base import NoArgsCommand, CommandError
from atados_core.models import Nonprofit, Address, City
from optparse import make_option
import csv
import time
class Command(NoArgsCommand):
help = 'Update nonprofit volunteer count'
def handle_noargs(self, **options):
nonprofits = Nonprofit.objects.all()
for n in nonprofits:
n.volunteer_count = n.get_volunteers_numbers()()
n.save()
| {
"content_hash": "5c49eeed753869cc1a0552447b1a3cd8",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 28.733333333333334,
"alnum_prop": 0.7470997679814385,
"repo_name": "atados/api",
"id": "c7b87964a3087e5f0b70719f04f040f4a0510b81",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atados_core/management/commands/update_nonprofits_volunteer_count.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31943"
},
{
"name": "HTML",
"bytes": "138142"
},
{
"name": "JavaScript",
"bytes": "5492"
},
{
"name": "Makefile",
"bytes": "1381"
},
{
"name": "Python",
"bytes": "394268"
},
{
"name": "Shell",
"bytes": "1060"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
"""
bootstrap client session
"""
import frappe
import frappe.defaults
import frappe.desk.desk_page
from frappe.utils import get_gravatar, get_url
from frappe.desk.form.load import get_meta_bundle
from frappe.utils.change_log import get_versions
def get_bootinfo():
"""build and return boot info"""
frappe.set_user_lang(frappe.session.user)
bootinfo = frappe._dict()
hooks = frappe.get_hooks()
doclist = []
# user
get_user(bootinfo)
# system info
bootinfo['sysdefaults'] = frappe.defaults.get_defaults()
bootinfo['server_date'] = frappe.utils.nowdate()
if frappe.session['user'] != 'Guest':
bootinfo['user_info'] = get_fullnames()
bootinfo['sid'] = frappe.session['sid'];
# home page
bootinfo.modules = {}
for app in frappe.get_installed_apps():
try:
bootinfo.modules.update(frappe.get_attr(app + ".config.desktop.get_data")() or {})
except ImportError:
pass
except AttributeError:
pass
bootinfo.module_app = frappe.local.module_app
bootinfo.hidden_modules = frappe.db.get_global("hidden_modules")
bootinfo.doctype_icons = dict(frappe.db.sql("""select name, icon from
tabDocType where ifnull(icon,'')!=''"""))
bootinfo.single_types = frappe.db.sql_list("""select name from tabDocType where ifnull(issingle,0)=1""")
add_home_page(bootinfo, doclist)
bootinfo.page_info = get_allowed_pages()
load_translations(bootinfo)
add_timezone_info(bootinfo)
load_conf_settings(bootinfo)
load_print(bootinfo, doclist)
doclist.extend(get_meta_bundle("Page"))
# ipinfo
if frappe.session['data'].get('ipinfo'):
bootinfo['ipinfo'] = frappe.session['data']['ipinfo']
# add docs
bootinfo['docs'] = doclist
for method in hooks.boot_session or []:
frappe.get_attr(method)(bootinfo)
if bootinfo.lang:
bootinfo.lang = unicode(bootinfo.lang)
bootinfo['versions'] = {k: v['version'] for k, v in get_versions().items()}
bootinfo.error_report_email = frappe.get_hooks("error_report_email")
bootinfo.calendars = sorted(frappe.get_hooks("calendars"))
return bootinfo
def load_conf_settings(bootinfo):
from frappe import conf
bootinfo.max_file_size = conf.get('max_file_size') or 5242880
for key in ['developer_mode']:
if key in conf: bootinfo[key] = conf.get(key)
def get_allowed_pages():
roles = frappe.get_roles()
page_info = {}
for p in frappe.db.sql("""select distinct
tabPage.name, tabPage.modified, tabPage.title
from `tabPage Role`, `tabPage`
where `tabPage Role`.role in (%s)
and `tabPage Role`.parent = `tabPage`.name""" % ', '.join(['%s']*len(roles)),
roles, as_dict=True):
page_info[p.name] = {"modified":p.modified, "title":p.title}
# pages where role is not set are also allowed
for p in frappe.db.sql("""select name, modified, title
from `tabPage` where
(select count(*) from `tabPage Role`
where `tabPage Role`.parent=tabPage.name) = 0""", as_dict=1):
page_info[p.name] = {"modified":p.modified, "title":p.title}
return page_info
def load_translations(bootinfo):
if frappe.local.lang != 'en':
messages = frappe.get_lang_dict("boot")
bootinfo["lang"] = frappe.lang
# load translated report names
for name in bootinfo.user.all_reports:
messages[name] = frappe._(name)
bootinfo["__messages"] = messages
def get_fullnames():
"""map of user fullnames"""
ret = frappe.db.sql("""select name,
concat(ifnull(first_name, ''),
if(ifnull(last_name, '')!='', ' ', ''), ifnull(last_name, '')) as fullname,
user_image as image, gender, email
from tabUser where ifnull(enabled, 0)=1 and user_type!="Website User" """, as_dict=1)
d = {}
for r in ret:
if not r.image:
r.image = get_gravatar()
d[r.name] = r
return d
def get_user(bootinfo):
"""get user info"""
bootinfo.user = frappe.get_user().load_user()
def add_home_page(bootinfo, docs):
"""load home page"""
if frappe.session.user=="Guest":
return
home_page = frappe.db.get_default("desktop:home_page")
try:
page = frappe.desk.desk_page.get(home_page)
except (frappe.DoesNotExistError, frappe.PermissionError):
frappe.message_log.pop()
page = frappe.desk.desk_page.get('desktop')
bootinfo['home_page'] = page.name
docs.append(page)
def add_timezone_info(bootinfo):
system = bootinfo.sysdefaults.get("time_zone")
import frappe.utils.momentjs
bootinfo.timezone_info = {"zones":{}, "rules":{}, "links":{}}
frappe.utils.momentjs.update(system, bootinfo.timezone_info)
def load_print(bootinfo, doclist):
print_settings = frappe.db.get_singles_dict("Print Settings")
print_settings.doctype = ":Print Settings"
doclist.append(print_settings)
load_print_css(bootinfo, print_settings)
def load_print_css(bootinfo, print_settings):
bootinfo.print_css = frappe.get_attr("frappe.templates.pages.print.get_print_style")(print_settings.print_style or "Modern", for_legacy=True)
| {
"content_hash": "704e89b093acbb293e0a18f96f4cf3c4",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 142,
"avg_line_length": 29.925465838509318,
"alnum_prop": 0.7027812370278124,
"repo_name": "mbauskar/omnitech-demo-frappe",
"id": "6a88bbc0915ae6cf8f3e6954002f2a721a75c55e",
"size": "4919",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "frappe/boot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246690"
},
{
"name": "HTML",
"bytes": "140316"
},
{
"name": "JavaScript",
"bytes": "1042712"
},
{
"name": "Python",
"bytes": "1133822"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
import unittest
from image_to_data import *
class TestConstructor(unittest.TestCase):
def setUp(self):
self.td = {}
self.td[0] = {'source':'image_to_data_test/templates/a.jpg', 'output_data':'a'}
self.td[1] = {'source':'image_to_data_test/templates/d.jpg', 'output_data':'d'}
self.td[2] = {'source':'image_to_data_test/templates/e.jpg', 'output_data':'e'}
self.td[3] = {'source':'image_to_data_test/templates/k.jpg', 'output_data':'k'}
self.td[4] = {'source':'image_to_data_test/templates/m.jpg', 'output_data':'m'}
self.td[5] = {'source':'image_to_data_test/templates/n.jpg', 'output_data':'n'}
self.td[5] = {'source':'image_to_data_test/templates/r.jpg', 'output_data':'r'}
self.td[6] = {'source':'image_to_data_test/templates/t.jpg', 'output_data':'t'}
self.td[7] = {'source':'image_to_data_test/templates/u.jpg', 'output_data':'u'}
self.td[8] = {'source':'image_to_data_test/templates/w.jpg', 'output_data':'w'}
self.td[9] = {'source':'image_to_data_test/templates/y.jpg', 'output_data':'y'}
def test_construction(self):
i2d = ImageToData(self.td)
self.assertIn([640,480],i2d.ac.get('crop','resolutions'))
self.assertIn([800,600],i2d.ac.get('crop','resolutions'))
self.assertIn([1920,1200],i2d.ac.get('crop','resolutions'))
self.assertNotIn([53,53],i2d.ac.get('crop','resolutions'))
def test_custom_resolutions(self):
i2d = ImageToData(self.td, resolutions = [[53,53]])
self.assertIn([53,53],i2d.ac.get('crop','resolutions'))
self.assertNotIn([640,480],i2d.ac.get('crop','resolutions'))
class TestGetData(unittest.TestCase):
def setUp(self):
pass
def test_function(self):
pass
class Test_PrepareImage(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_PrepareTemplate(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_PrepareTemplates(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_Identify(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_PcntRegionToROI(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_SourceToImage(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_AdjustForRules(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_AdjustForMethod(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_ConvertToRGB(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_ConvertToGrayscale(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_ConvertToAverageColor(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_GetScreenshot(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_ConvertToRGB(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_CropToResolution(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_CropTo4To3Aspect(unittest.TestCase):
def setUp(self):
def test_function(self):
class Test_ShrinkToHeight(unittest.TestCase):
def setUp(self):
def test_function(self):
if __name__ == "__main__":
try: unittest.main()
except SystemExit: pass
##
## ''' Create Templates and data for testing '''
##
## ''' Create Regions for all letter tiles '''
## letter_regions = {}
## height = 600
## width = 800
## tiles_top = 309
## tiles_left = 302
## step = 50
## letter_regions = flexframe.FlexFrame('y', 'x')
## for y in range(tiles_top, tiles_top + step*3 + 1, step):
## for x in range(tiles_left, tiles_left + step*3 + 1, step):
## letter_region = {'top':100*y/height,
## 'left':100*x/width,
## 'bottom':100*(y+step)/height,
## 'right':100*(x+step)/width}
## letter_regions.place(letter_region, y, x)
##
## itd = ImageToData(templates_and_data = td)
##
##
## print('Test grayscale letter matching ********************************')
## indexed_letters = itd.get_data('image_to_data_test/stun.jpg',
## method = 'grayscale correlation',
## pcnt_regions = letter_regions,
## crop_to_resolution = True,
## crop_to_4to3_aspect = True,
## shrink_to_height = 480)
## print('stun.jpg: ', indexed_letters)
## print('Test rgb letter matching ********************************')
## letters = itd.get_data('image_to_data_test/stun.jpg',
## method = 'rgb correlation',
## regions = letter_regions,
## crop_to_resolution = True,
## crop_to_4to3_aspect = True,
## shrink_to_height = 480)
## print(letters)
## print('Test image loading and processing ****************')
## ''' create list of all source types '''
## sources = []
## sources.append('image_to_data_test/sample_for_load_borders.bmp') #image_to_data_test path
## sources.append(cvLoadImage('image_to_data_test/stun.jpg')) #image_to_data_test image
## sources.append('image_to_data') #image_to_data_test title
## ''' create list of all method types '''
## methods = []
## methods.append('rgb correlation')
## methods.append('grayscale correlation')
## methods.append('average color')
## ''' create list of values for each rule '''
## crop_to_resolution_values = []
## crop_to_4to3_aspect_values = []
## shrink_to_height_values = []
## crop_to_resolution_values.append(False)
## crop_to_resolution_values.append(True)
## crop_to_4to3_aspect_values.append(False)
## crop_to_4to3_aspect_values.append(True)
## shrink_to_height_values.append(None)
## shrink_to_height_values.append(320)
## ''' image_to_data_test all combinations of values '''
## for source in sources:
## for method in methods:
## for crop_to_resolution in crop_to_resolution_values:
## for crop_to_4to3_aspect in crop_to_4to3_aspect_values:
## for shrink_to_height in shrink_to_height_values:
## title = 'iplimage' if isinstance(source, IplImage) else os.path.split(source)[1]
## title += ' ** ' + method
## title += ' ** crop_res ' + str(crop_to_resolution)
## title += ' ** crop_aspect ' + str(crop_to_4to3_aspect)
## title += ' ** shrink_to ' + str(shrink_to_height)
## image = itd._prepare_image(source, method,
## crop_to_resolution,
## crop_to_4to3_aspect,
## shrink_to_height)
## print('_prepare_image Test: ' + title)
## _debug_display(image, title)
## key = input('enter to continue q to quit')
## if key in ('q','Q'):
## return
| {
"content_hash": "023ebcfe765a09dad61ab059d29547e0",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 106,
"avg_line_length": 35.78365384615385,
"alnum_prop": 0.5640198844551928,
"repo_name": "kobejohn/BookwormUtility",
"id": "fdaf46c0dc94abef04c0f68c9ee11b03beb42f08",
"size": "7443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/image_to_data_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "123897"
}
],
"symlink_target": ""
} |
'''
Created on 2. okt. 2010
@author: Yngve
'''
"""Report Renego patched/unpatched based on Alexa ranking only sites with EV certificates"""
import sys,os,subprocess,time,os.path
sys.path.insert(1, os.path.join(".."))
import libinit
from optparse import OptionParser
import probedb.standalone
import probedb.probedata2.models as ProbeData
import probedb.resultdb2.models as Results
options_config = OptionParser()
options_config.add_option("--testbase2", action="store_true", dest="use_testbase2")
options_config.add_option("--threads", action="store", type="int", dest="threads", default=20)
options_config.add_option("--id", action="store", type="int", dest="run_id", default=0)
options_config.add_option("--verbose", action="store_true", dest="verbose")
(options, args) = options_config.parse_args()
if options.run_id:
run = ProbeData.ProbeRun.objects.get(id = options.run_id)
main_result_list = Results.ResultSummaryList.objects.filter(part_of_run__id=run.id)[0]
patched = main_result_list.GetAnalyze(
filter = {Results.ResultSummaryList.QUERY_CONDITION:[Results.ResultCondition.RESULTC_RENEGO, Results.ResultCondition.RESULTC_EXTENDED_VALIDATION_CERT]},
summaries = {"hosts":[Results.ResultSummaryList.RESULT_HOSTS]}
)
unpatched_renego = main_result_list.GetAnalyze(
filter = {Results.ResultSummaryList.QUERY_CONDITION:[Results.ResultCondition.RESULTC_NONRENEGO,Results.ResultCondition.RESULTC_PERFORM_RENEGO, Results.ResultCondition.RESULTC_EXTENDED_VALIDATION_CERT]},
summaries = {"hosts":[Results.ResultSummaryList.RESULT_HOSTS]}
)
all = main_result_list.GetAnalyze(
filter = {Results.ResultSummaryList.QUERY_CONDITION:[Results.ResultCondition.RESULTC_EXTENDED_VALIDATION_CERT]},
summaries = {"hosts":[Results.ResultSummaryList.RESULT_HOSTS]}
)
summary = {}
for (update_field, hostlist) in [("total", all),("patched",patched),("unpatched_renego", unpatched_renego)]:
for x in hostlist["hosts"]:
if x.servername.alexa_rating > 0:
summary.setdefault(x.servername.alexa_rating, {"patched":0, "total":0, "unpatched_renego":0})[update_field]+=1
total_patched = 0
total = 0
total_renego = 0
import csv
file = csv.writer(open("alexa_ev_renego_rating.csv","wb"))
file.writerow(["ranking","site patched", "site total","total patched","total", "patched percent", "unpatched renego", "total unpatched renego", "unpatched renego percent"])
for x,y in sorted(summary.iteritems()):
total += y["total"]
total_patched += y["patched"]
total_renego += y["unpatched_renego"]
file.writerow([x, y["patched"], y["total"],
total_patched, total, ("%.2f%%" % ((float(total_patched)/float(total))*100.0 if total else 0,)),
y["unpatched_renego"], total_renego,("%.2f%%" % ((float(total_renego)/float(total-total_patched))*100.0 if total-total_patched else 0,)),
])
| {
"content_hash": "b25b8779666d986b913f730a5943e754",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 206,
"avg_line_length": 40.82857142857143,
"alnum_prop": 0.7200839748075577,
"repo_name": "operasoftware/tlsprober",
"id": "897634e2fce87b3c60485c2c3b3c83652527f9e1",
"size": "3467",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "analyze/alexa_ev_analyze.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "21613"
},
{
"name": "Python",
"bytes": "457708"
},
{
"name": "Shell",
"bytes": "6332"
}
],
"symlink_target": ""
} |
#
# sources.py
#
# Convert source code comments to multi-line blocks (library file).
#
# Copyright 2002-2015 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This library file contains definitions of classes needed to decompose C
# source code files into a series of multi-line `blocks'. There are two
# kinds of blocks.
#
# - Normal blocks, which contain source code or ordinary comments.
#
# - Documentation blocks, which have restricted formatting, and whose text
# always start with a documentation markup tag like `<Function>',
# `<Type>', etc.
#
# The routines to process the content of documentation blocks are contained
# in file `content.py'; the classes and methods found here only deal with
# text parsing and basic documentation block extraction.
#
import fileinput, re, sys, os, string
################################################################
##
## SOURCE BLOCK FORMAT CLASS
##
## A simple class containing compiled regular expressions to detect
## potential documentation format block comments within C source code.
##
## The `column' pattern must contain a group to `unbox' the content of
## documentation comment blocks.
##
## Later on, paragraphs are converted to long lines, which simplifies the
## regular expressions that act upon the text.
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""Create a block pattern, used to recognize special documentation
blocks."""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# Format 1 documentation comment blocks.
#
# /************************************/ (at least 2 asterisks)
# /* */
# /* */
# /* */
# /************************************/ (at least 2 asterisks)
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# Format 2 documentation comment blocks.
#
# /************************************ (at least 2 asterisks)
# *
# * (1 asterisk)
# *
# */ (1 or more asterisks)
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?![*/]) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# The list of supported documentation block formats. We could add new ones
# quite easily.
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# The following regular expressions correspond to markup tags within the
# documentation comment blocks. They are equivalent despite their different
# syntax.
#
# A markup tag consists of letters or character `-', to be found in group 1.
#
# Notice that a markup tag _must_ begin a new paragraph.
#
re_markup_tag1 = re.compile( r'''\s*<((?:\w|-)*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@((?:\w|-)*):''' ) # @xxxx: format
#
# The list of supported markup tags. We could add new ones quite easily.
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# A regular expression to detect a cross reference, after markup tags have
# been stripped off.
#
# Two syntax forms are supported:
#
# @<name>
# @<name>[<id>]
#
# where both `<name>' and `<id>' consist of alphanumeric characters, `_',
# and `-'. Use `<id>' if there are multiple, valid `<name>' entries.
#
# Example: @foo[bar]
#
re_crossref = re.compile( r"""
@
(?P<name>(?:\w|-)+
(?:\[(?:\w|-)+\])?)
(?P<rest>.*)
""", re.VERBOSE )
#
# Two regular expressions to detect italic and bold markup, respectively.
# Group 1 is the markup, group 2 the rest of the line.
#
# Note that the markup is limited to words consisting of letters, digits,
# the characters `_' and `-', or an apostrophe (but not as the first
# character).
#
re_italic = re.compile( r"_((?:\w|-)(?:\w|'|-)*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*((?:\w|-)(?:\w|'|-)*)\*(.*)" ) # *bold*
#
# This regular expression code to identify an URL has been taken from
#
# http://mail.python.org/pipermail/tutor/2002-September/017228.html
#
# (with slight modifications).
#
urls = r'(?:https?|telnet|gopher|file|wais|ftp)'
ltrs = r'\w'
gunk = r'/#~:.?+=&%@!\-'
punc = r'.:?\-'
any = "%(ltrs)s%(gunk)s%(punc)s" % { 'ltrs' : ltrs,
'gunk' : gunk,
'punc' : punc }
url = r"""
(
\b # start at word boundary
%(urls)s : # need resource and a colon
[%(any)s] +? # followed by one or more of any valid
# character, but be conservative and
# take only what you need to...
(?= # [look-ahead non-consumptive assertion]
[%(punc)s]* # either 0 or more punctuation
(?: # [non-grouping parentheses]
[^%(any)s] | $ # followed by a non-url char
# or end of the string
)
)
)
""" % {'urls' : urls,
'any' : any,
'punc' : punc }
re_url = re.compile( url, re.VERBOSE | re.MULTILINE )
#
# A regular expression that stops collection of comments for the current
# block.
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' ) # /* */
#
# A regular expression to find possible C identifiers while outputting
# source code verbatim, covering things like `*foo' or `(bar'. Group 1 is
# the prefix, group 2 the identifier -- since we scan lines from left to
# right, sequentially splitting the source code into prefix and identifier
# is fully sufficient for our purposes.
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# A regular expression that matches a list of reserved C source keywords.
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## There are two important fields in a `SourceBlock' object.
##
## self.lines
## A list of text lines for the corresponding block.
##
## self.content
## For documentation comment blocks only, this is the block content
## that has been `unboxed' from its decoration. This is `None' for all
## other blocks (i.e., sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only -- not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The `SourceProcessor' is in charge of reading a C source file and
## decomposing it into a series of different `SourceBlock' objects.
##
## A SourceBlock object consists of the following data.
##
## - A documentation comment block using one of the layouts above. Its
## exact format will be discussed later.
##
## - Normal sources lines, including comments.
##
##
class SourceProcessor:
def __init__( self ):
"""Initialize a source processor."""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""Reset a block processor and clean up all its blocks."""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""Parse a C source file and add its blocks to the processor's
list."""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# A normal block end. Add it to `lines' and create a
# new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# A normal column line. Add it to `lines'.
self.lines.append( line )
else:
# An unexpected block end. Create a new block, but
# don't process the line.
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""Process a normal line and check whether it is the start of a new
block."""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""Add the current accumulated lines and create a new block."""
if self.lines != []:
block = SourceBlock( self,
self.filename,
self.lineno,
self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""Print all blocks in a processor."""
for b in self.blocks:
b.dump()
# eof
| {
"content_hash": "d7084f125a7984f6d487279390addd1d",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 76,
"avg_line_length": 33.42439024390244,
"alnum_prop": 0.48839754816112085,
"repo_name": "yapingxin/saturn-gui-lib-workshop",
"id": "edce1a5f327c19c057843265b7e23f7868853b9e",
"size": "13704",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Lib/FreeType/freetype-2.6.2/src/tools/docmaker/sources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "8286"
},
{
"name": "C",
"bytes": "18974626"
},
{
"name": "C++",
"bytes": "2666689"
},
{
"name": "CMake",
"bytes": "104576"
},
{
"name": "CSS",
"bytes": "3042"
},
{
"name": "DIGITAL Command Language",
"bytes": "74190"
},
{
"name": "Gnuplot",
"bytes": "630"
},
{
"name": "Groff",
"bytes": "4882"
},
{
"name": "HTML",
"bytes": "2698820"
},
{
"name": "M4",
"bytes": "114055"
},
{
"name": "Makefile",
"bytes": "582662"
},
{
"name": "Objective-C",
"bytes": "25406"
},
{
"name": "Perl",
"bytes": "64219"
},
{
"name": "Python",
"bytes": "382829"
},
{
"name": "Shell",
"bytes": "1041344"
}
],
"symlink_target": ""
} |
import json
import os
import shutil
import unittest
from tempfile import mkdtemp
from ..dicts import MissingValue
from ..smush import (config_sources, available_sources, smush_config,
LenientJSONEncoder)
class TestLenientJSONEncoder(unittest.TestCase):
"""Tests for LenientJSONEncoder.
Not a part of the public API, only used in debugging, but worth testing
the behavior
"""
def test_encode(self):
self.assertEqual('{}', json.dumps({}, cls=LenientJSONEncoder))
def test_missing(self):
expected = '"### MISSING VALUE ###"'
obj = MissingValue('test')
self.assertEqual(expected, json.dumps(obj, cls=LenientJSONEncoder))
def test_unencodable(self):
obj = object()
self.assertRaises(TypeError, json.dumps, obj, cls=LenientJSONEncoder)
class TestConfigSources(unittest.TestCase):
def setUp(self):
self.tmpdir = mkdtemp(prefix='yoconfigurator-test')
self.appdir = os.path.join(self.tmpdir, 'app')
self.dc1dir = os.path.join(self.tmpdir, 'dc1')
self.dc2dir = os.path.join(self.tmpdir, 'dc2')
os.mkdir(self.appdir)
os.mkdir(self.dc1dir)
os.mkdir(self.dc2dir)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def touch(self, name):
f = open(os.path.join(self.tmpdir, name), 'w')
f.close()
def create_sources(self, sources):
for source in sources:
self.touch(os.path.join(source[0], source[1] + '.py'))
def clean_sources(self, sources):
"""Convert a config_sources result into a create_sources list.
i.e. the last two path components, minus a file extension
"""
return [tuple(path.rsplit('.', 1)[0].rsplit('/', 2)[1:])
for path in sources]
def test_available_sources(self):
sources = [
('dc1', 'common-foo'),
]
self.create_sources(sources)
# An extra source that won't be present
all_sources = [
([self.dc1dir], 'common-foo'),
([self.dc1dir], 'common-foo-bar'),
]
r = available_sources(all_sources)
r = self.clean_sources(r)
self.assertEqual(r, sources)
def test_source_order(self):
sources = [
('dc1', 'common'),
('dc1', 'common-foo'),
('dc1', 'common-foo-bar'),
('dc1', 'common-overrides'),
('app', 'baz-default'),
('app', 'baz-foo'),
('app', 'baz-foo-bar'),
('dc1', 'baz'),
('dc1', 'baz-foo'),
('dc1', 'baz-foo-bar'),
('dc1', 'baz-overrides'),
]
self.create_sources(sources)
r = config_sources('baz', 'foo', 'bar', [self.dc1dir], self.appdir)
r = self.clean_sources(r)
self.assertEqual(r, sources)
def test_local_source_order(self):
sources = [
('dc1', 'common'),
('dc1', 'common-foo'),
('dc1', 'common-foo-bar'),
('dc1', 'common-local'),
('dc1', 'common-overrides'),
('app', 'baz-default'),
('app', 'baz-foo'),
('app', 'baz-foo-bar'),
('dc1', 'baz'),
('dc1', 'baz-foo'),
('dc1', 'baz-foo-bar'),
('app', 'baz-local'),
('dc1', 'baz-local'),
('dc1', 'baz-overrides'),
]
self.create_sources(sources)
r = config_sources('baz', 'foo', 'bar', [self.dc1dir], self.appdir,
local=True)
r = self.clean_sources(r)
self.assertEqual(r, sources)
def test_build_source_order(self):
sources = [
('dc1', 'common'),
('dc1', 'common-foo'),
('dc1', 'common-foo-bar'),
('dc1', 'common-local'),
('dc1', 'common-build'),
('dc1', 'common-overrides'),
('app', 'baz-default'),
('app', 'baz-foo'),
('app', 'baz-foo-bar'),
('dc1', 'baz'),
('dc1', 'baz-foo'),
('dc1', 'baz-foo-bar'),
('app', 'baz-local'),
('dc1', 'baz-local'),
('dc1', 'baz-build'),
('dc1', 'baz-overrides'),
]
self.create_sources(sources)
r = config_sources('baz', 'foo', 'bar', [self.dc1dir], self.appdir,
local=True, build=True)
r = self.clean_sources(r)
self.assertEqual(r, sources)
def test_build_implies_local(self):
sources = [
('dc1', 'common'),
('dc1', 'common-foo'),
('dc1', 'common-foo-bar'),
('dc1', 'common-local'),
('dc1', 'common-build'),
('dc1', 'common-overrides'),
('app', 'baz-default'),
('app', 'baz-foo'),
('app', 'baz-foo-bar'),
('dc1', 'baz'),
('dc1', 'baz-foo'),
('dc1', 'baz-foo-bar'),
('app', 'baz-local'),
('dc1', 'baz-local'),
('dc1', 'baz-build'),
('dc1', 'baz-overrides'),
]
self.create_sources(sources)
r = config_sources('baz', 'foo', 'bar', [self.dc1dir], self.appdir,
local=False, build=True)
r = self.clean_sources(r)
self.assertEqual(r, sources)
def test_multiple_config_dirs(self):
sources = [
('dc1', 'common-foo'),
('dc2', 'common-foo'),
('dc2', 'common-overrides')
]
self.create_sources(sources)
r = config_sources('baz', 'foo', 'bar', [self.dc1dir, self.dc2dir],
self.appdir)
r = self.clean_sources(r)
self.assertEqual(r, sources)
def test_override_config_dirs(self):
sources = [
('dc1', 'common-foo'),
('dc2', 'common-foo'),
]
self.create_sources(sources)
r = config_sources('baz', 'foo', 'bar', [self.dc1dir, self.dc2dir],
self.appdir)
r = self.clean_sources(r)
self.assertEqual(r, sources)
class TestSmush(unittest.TestCase):
def setUp(self):
self.tmpdir = mkdtemp(prefix='yoconfigurator-test')
def tearDown(self):
# coverage gets confused if we delete files we've imported into our
# namespace https://github.com/nose-devs/nose/issues/111
if 'NO_CLEAN_TESTS' not in os.environ:
shutil.rmtree(self.tmpdir)
def test_nop(self):
c = smush_config([])
self.assertEqual(c, {})
def write(self, name, contents):
"""Write contents to tmpdir/name. Return full filename."""
fn = os.path.join(self.tmpdir, name)
with open(fn, 'w') as f:
f.write(contents)
return fn
def test_single(self):
fn = self.write('test.py', """
from yoconfigurator.dicts import merge_dicts
def update(config):
return merge_dicts(config, {'a': 1})
""")
c = smush_config([fn])
self.assertEqual(c, {'a': 1})
def test_initial(self):
fn = self.write('test.py', """
from yoconfigurator.dicts import merge_dicts
def update(config):
return merge_dicts(config, {'a': 2})
""")
c = smush_config([fn], initial={'a': 1})
self.assertEqual(c, {'a': 2})
def test_multiple(self):
a = self.write('a.py', """
from yoconfigurator.dicts import merge_dicts
def update(config):
return merge_dicts(config, {'a': 1})
""")
b = self.write('b.py', """
from yoconfigurator.dicts import merge_dicts
def update(config):
return merge_dicts(config, {'b': 2})
""")
c = smush_config([a, b])
self.assertEqual(c, {'a': 1, 'b': 2})
def test_missing_value(self):
a = self.write('a.py', """
from yoconfigurator.dicts import MissingValue, merge_dicts
def update(config):
return merge_dicts(config, {'a': MissingValue('a')})
""")
b = self.write('b.py', """
from yoconfigurator.dicts import merge_dicts
def update(config):
return merge_dicts(config, {'a': 1})
""")
c = smush_config([a, b])
self.assertEqual(c, {'a': 1})
| {
"content_hash": "1d3c313c42ce471019e96ad21d9dbe0d",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 77,
"avg_line_length": 30.681647940074907,
"alnum_prop": 0.517333984375,
"repo_name": "yola/yoconfigurator",
"id": "2bb9028431a206a29ffdab60fd59e7944b94648b",
"size": "8192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yoconfigurator/tests/test_smush.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35669"
}
],
"symlink_target": ""
} |
from __future__ import with_statement, print_function, absolute_import
from setuptools import setup, find_packages, Extension
from distutils.version import LooseVersion
import numpy as np
import os
from os.path import join
# TODO
pcl_ver = "1.8"
vtk_ver = "7.1"
min_cython_ver = '0.19.0'
try:
import Cython
ver = Cython.__version__
_CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver)
except ImportError:
_CYTHON_INSTALLED = False
try:
if not _CYTHON_INSTALLED:
raise ImportError('No supported version of Cython installed.')
from Cython.Distutils import build_ext
from Cython.Build import cythonize
cython = True
except ImportError:
cython = False
if cython:
ext = '.pyx'
cmdclass = {'build_ext': build_ext}
else:
ext = '.cpp'
cmdclass = {}
if not os.path.exists(join("pypcl", "common" + ext)):
raise RuntimeError("Cython is required to generate C++ codes.")
ext_modules = cythonize(
[Extension(
name="pypcl.common",
sources=[
join("pypcl", "common" + ext),
],
include_dirs=[np.get_include(),
join("/usr/local/include/pcl-" + pcl_ver),
],
library_dirs=["/usr/local/lib"],
libraries=["pcl_common"],
extra_compile_args=["-std=c++11", "-stdlib=libc++", "-mmacosx-version-min=10.8"],
extra_link_args=[],
language="c++"),
Extension(
name="pypcl.visualization",
sources=[
join("pypcl", "visualization" + ext),
],
include_dirs=[np.get_include(),
join("/usr/local/include/pcl-" + pcl_ver),
"/usr/local/include/vtk-" + vtk_ver,
],
library_dirs=["/usr/local/lib"],
libraries=["pcl_visualization"],
extra_compile_args=["-std=c++11", "-stdlib=libc++", "-mmacosx-version-min=10.8"],
extra_link_args=[],
language="c++")
],
)
setup(
name='pypcl',
version='0.0.1-dev',
description='A python wrapper for the Point Cloud Library',
author='Ryuichi Yamamoto',
author_email='zryuichi@gmail.com',
url='https://github.com/r9y9/pypcl',
license='MIT',
packages=find_packages(),
ext_modules=ext_modules,
cmdclass=cmdclass,
install_requires=[
'numpy >= 1.8.0',
'six'
],
tests_require=['nose', 'coverage'],
extras_require={
'docs': ['numpydoc', 'sphinx_rtd_theme'],
'test': ['nose'],
'develop': ['cython >= ' + min_cython_ver],
},
classifiers=[
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
keywords=["pypcl", "PCL", "Point Cloud Library"]
)
| {
"content_hash": "0dbfc6399198ceb32d3cb49d7cb8269e",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 90,
"avg_line_length": 29.954954954954953,
"alnum_prop": 0.5696240601503759,
"repo_name": "r9y9/pypcl",
"id": "92fff32325d457c62a856732797de944312015b2",
"size": "3342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6080"
}
],
"symlink_target": ""
} |
import os
import rpm
class ConfigurationManager(object):
"""
ConfigurationManager manage a set of configuration
"""
_hdrs = []
_confs = []
_refConf = None
_writeConf = None
@staticmethod
def loadConfigurations(readConfPaths, refConfPath, writeConfPath = None):
"""
DESC : load configurations in the manager
PARAMS : readConfPaths - paths to the reading configurations
refConfPath - paths to the reference configuration, the dependencies are resolved on this conf
writeConfPath - configuration where the RPM packages are installed
"""
if readConfPaths == None:
readConfPaths = []
ConfigurationManager._refConf = Configuration(refConfPath, False)
if writeConfPath == None:
ConfigurationManager._confs, ConfigurationManager._hdrs = ConfigurationManager._parseHdrInConfs(readConfPaths)
else:
readConfPaths.insert(0, writeConfPath)
ConfigurationManager._confs, ConfigurationManager._hdrs = ConfigurationManager._parseHdrInConfs(readConfPaths)
ConfigurationManager._writeConf = ConfigurationManager._confs[0]
@staticmethod
def _parseHdrInConfs(confPaths):
"""
DESC : create conf objects and extracts headers from installed packages in each confs
PARAMS : - confPaths list of configuration paths
RETURNS : two lists : Configuration list
header list
"""
hdrs = []
confs = []
for confPath in confPaths:
c = Configuration(confPath)
confs.append(c)
hdrs.extend(c.hdrs)
return confs, hdrs
@staticmethod
def install(pkgPaths):
"""
DESC : install packages in the writeConfPath
PARAMS : pkgPaths - list of packages paths
RETURNS :
"""
#print("Installing packages in conf %s" % ConfigurationManager._writeConf.confPath)
ConfigurationManager._writeConf.install(pkgPaths)
@staticmethod
def verify():
"""
DESC : compute dependencies resolved between configurations
PARAMS :
RETURNS : the unresolved dependencies
"""
dep = ConfigurationManager._verify(ConfigurationManager._hdrs)
return dep
@staticmethod
def verifyForInstall(hdrs):
"""
DESC : compute dependencies resolved between configurations and packages
PARAMS : hdrs - list of headers
RETURNS : the unresolved dependencies
"""
t_hdrs = ConfigurationManager._hdrs[:]
t_hdrs.extend(hdrs)
dep = ConfigurationManager._verify(t_hdrs)
return dep
@staticmethod
def _verify(hdrs):
"""
DESC : compute dependencies on the refConf
PARAMS : hdrs - list of headers
RETURNS : the unresolved dependencies
"""
dep = ConfigurationManager._refConf.checkHeaders(hdrs)
return dep
@staticmethod
def createConf(path):
"""
DESC : create a configuration
PARAMS : path - directory where the configuration will be created
RETURNS :
"""
if not os.path.isdir(path) and not os.path.isdir(path.rsplit('/', 1)[0]):
raise Exception(path + ' not a directory')
RPMdb = path + Configuration.DEFAULT_RPM_DB_PATH
os.makedirs(RPMdb)
rpm.addMacro("_prefix", path)
rpm.addMacro("_dbpath", RPMdb)
ts = rpm.TransactionSet()
ts.initDB()
class Configuration(object):
"""
Represents a configuration. A configuration is a set of services with it's
own RPM database. It's attributes are :
- path to the configuration
- default path to the RPM database
- RPM lib's TransactionSet object
"""
DEFAULT_RPM_DB_PATH = '/var/lib/rpm'
def __init__(self, confPath, readHeaders = True, RPMDbPath = DEFAULT_RPM_DB_PATH):
"""
DESC : Constructor for Configuration class. Two arguments :
PARAMS : - confPath : configuration path
- RPMDbPath : -TESTING ONLY- *relative* RPM database path in the configuration -
"""
self._confPath = confPath
self._RPMDbPath = RPMDbPath
#rpm.setVerbosity(7)
rpm.addMacro("_dbpath", self._confPath + self._RPMDbPath)
self._ts = rpm.TransactionSet()
#self._ts.Debug(1)
self._ts.openDB()
rpm.delMacro("_dbpath")
self._hdrs = []
if readHeaders:
self._parseHdrs()
@property
def RPMDbPath(self):
return self._RPMDbPath
@property
def confPath(self):
return self._confPath
@property
def hdrs(self):
return self._hdrs
def _parseHdrs(self):
"""
DESC : Extracts installed package headers from the RPM DB and creates Service and RequiredService lists
PARAMS :
RETURNS : two lists, the first is the provided services (Service object) and the second is the Required object (RequiredServices object)
"""
self._hdrs = []
mi = self._ts.dbMatch()
for hdr in mi:
self._hdrs.append(hdr)
def install(self, pkgPaths):
"""
DESC : Install package(s) in the configuration
PARAMS : - packages, package list to be installed
RETURNS :
"""
for pkg in pkgPaths:
fd = os.open(pkg, os.O_RDONLY)
hdr = self._ts.hdrFromFdno(fd)
prefix = hdr[rpm.RPMTAG_PREFIXES][0] if len(hdr[rpm.RPMTAG_PREFIXES]) > 0 else ""
command = "rpm -Uvh --nodeps --prefix "+ self._confPath + prefix +" --dbpath "+ self._confPath + self._RPMDbPath +" "+ pkg
os.close(fd)
os.system(command)
def checkHeaders(self, hdrs):
"""
DESC : compute dependencies on this conf
PARAMS : hdrs - headers to verify
RETURNS : unresolved dependencies
"""
t_hdrs = self.hdrs[:]
t_hdrs.extend(hdrs)
for h in hdrs:
self._ts.addInstall(h, "")
return self._ts.check()
def __str__(self):
return "Configuration path : %s - Configuration RPM DB : %s" % (self.confPath, self.RPMDbPath)
| {
"content_hash": "0353c0f03176504802b7d94862789b0c",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 144,
"avg_line_length": 32.7319587628866,
"alnum_prop": 0.5976377952755906,
"repo_name": "adriengentil/ConfMgr",
"id": "8473abcc21f7e7a58d59b5188144da7b4235da45",
"size": "6498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AbstractRPM/Configurations.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "18658"
}
],
"symlink_target": ""
} |
import time
def main ():
char1 = input ('First Character of Plate: ')
time.sleep(60)
char2 = input ('\nSecond Character of Plate: ')
time.sleep(60)
char3 = input ('\nThird Character of Plate: ')
time.sleep(60)
char4 = input ('\nFourth Character of Plate: ')
time.sleep(60)
char5 = input ('\nFifth Character of Plate: ')
time.sleep(60)
char6 = input ('\nSixth Character of Plate: ')
time.sleep(60)
char7 = input ('\nSeventh Character of Plate: ')
time.sleep(30)
print("It is at the limo company")
main ()
| {
"content_hash": "cecc116c68881b2935f76f7420274b67",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 52,
"avg_line_length": 29.894736842105264,
"alnum_prop": 0.6161971830985915,
"repo_name": "hal00alex/SlothLookUp",
"id": "26c7105360bdbe45cb8258f16c3d3bc7a98e1e85",
"size": "568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Zoo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "568"
}
],
"symlink_target": ""
} |
import itertools
from oslo_log import log as logging
from webtest import TestApp
from designate.api import v2 as api_v2
from designate.api import middleware
from designate.tests.test_api import ApiTestCase
LOG = logging.getLogger(__name__)
INVALID_ID = [
'2fdadfb1-cf96-4259-ac6b-bb7b6d2ff98g',
'2fdadfb1cf964259ac6bbb7b6d2ff9GG',
'12345'
]
class ApiV2TestCase(ApiTestCase):
def setUp(self):
super(ApiV2TestCase, self).setUp()
# Ensure the v2 API is enabled
self.config(enable_api_v2=True, group='service:api')
# Create the application
self.app = api_v2.factory({})
# Inject the NormalizeURIMiddleware middleware
self.app = middleware.NormalizeURIMiddleware(self.app)
# Inject the ValidationError middleware
self.app = middleware.APIv2ValidationErrorMiddleware(self.app)
# Inject the FaultWrapper middleware
self.app = middleware.FaultWrapperMiddleware(self.app)
# Inject the TestContext middleware
self.app = middleware.TestContextMiddleware(
self.app, self.admin_context.tenant,
self.admin_context.tenant)
# Obtain a test client
self.client = TestApp(self.app)
def tearDown(self):
self.app = None
self.client = None
super(ApiV2TestCase, self).tearDown()
def _assert_invalid_uuid(self, method, url_format, *args, **kw):
"""
Test that UUIDs used in the URL is valid.
"""
count = url_format.count('%s')
for i in itertools.product(INVALID_ID, repeat=count):
self._assert_exception('invalid_uuid', 400, method, url_format % i)
def _assert_exception(self, expected_type, expected_status, obj,
*args, **kwargs):
"""
Checks the response that a api call with a exception contains the
wanted data.
"""
kwargs.setdefault('status', expected_status)
response = obj(*args, **kwargs) if not hasattr(obj, 'json') else obj
self.assertEqual(expected_status, response.json['code'])
self.assertEqual(expected_type, response.json['type'])
def _assert_invalid_paging(self, data, url, key):
"""
Test that certain circumstances is invalid for paging in a given url.
"""
self._assert_paging(data, url, key=key,
limit='invalid_limit',
expected_type='invalid_limit',
expected_status=400)
self._assert_paging(data, url, key=key,
sort_dir='invalid_sort_dir',
expected_type='invalid_sort_dir',
expected_status=400)
self._assert_paging(data, url, key=key,
sort_key='invalid_sort_key',
expected_type='invalid_sort_key',
expected_status=400)
self._assert_paging(data, url, key=key,
marker='invalid_marker',
expected_type='invalid_marker',
expected_status=400)
def _assert_paging(self, data, url, key=None, limit=5, sort_dir='asc',
sort_key='created_at', marker=None,
expected_type=None, expected_status=200):
def _page(marker=None):
params = {'limit': limit,
'sort_dir': sort_dir,
'sort_key': sort_key}
if marker is not None:
params['marker'] = marker
r = self.client.get(url, params, status=expected_status)
if expected_status != 200:
if expected_type:
self._assert_exception(expected_type, expected_status, r)
return r
else:
return r.json[key] if key in r.json else r.json
response = _page(marker=marker)
if expected_status != 200:
if expected_type:
self._assert_exception(expected_type, expected_status,
response)
return response
x = 0
length = len(data)
for i in xrange(0, length):
assert data[i]['id'] == response[x]['id']
x += 1
# Don't bother getting a new page if we're at the last item
if x == len(response) and i != length - 1:
x = 0
response = _page(response[-1:][0]['id'])
_page(marker=response[-1:][0]['id'])
| {
"content_hash": "2443a2bb679e60ccd319adf02429521b",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 33.42028985507246,
"alnum_prop": 0.550520381613183,
"repo_name": "cneill/designate",
"id": "cdfa294bcec7367ffbd09687a2ca0fb3020576ad",
"size": "5267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/tests/test_api/test_v2/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7596"
},
{
"name": "JavaScript",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "1866778"
},
{
"name": "Ruby",
"bytes": "4238"
},
{
"name": "Shell",
"bytes": "13350"
}
],
"symlink_target": ""
} |
import logging
from marshmallow import fields
from azure.ai.ml._schema.core.fields import ArmStr
from azure.ai.ml._schema.core.schema import PatchedSchemaMeta
from azure.ai.ml.constants._common import AzureMLResourceType
module_logger = logging.getLogger(__name__)
class JobOutputSchema(metaclass=PatchedSchemaMeta):
datastore_id = ArmStr(azureml_type=AzureMLResourceType.DATASTORE)
path = fields.Str()
| {
"content_hash": "fab332b68786af8d02be80a199fc076e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 69,
"avg_line_length": 29.714285714285715,
"alnum_prop": 0.8004807692307693,
"repo_name": "Azure/azure-sdk-for-python",
"id": "806791197e961b0d14196f743bd16e4cc5a7d3a4",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_schema/job/job_output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
# 2016: Cornelius Diekmann hacking untested stuff
from select import select
import os
import tty
import sys
import signal
__all__ = ["openpty","fork","spawn"]
#yes, this file is a copy of the python builtin pty lib
sys.stderr.write("Proudly presented by corny's hacked pty lib\n")
sys.stderr.flush()
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name)."""
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except OSError:
continue
return (fd, '/dev/tty' + x + y)
raise OSError('out of pty devices')
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except OSError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data:
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
# The expected path to leave this infinite loop is that the
# child exits and its slave_fd is destroyed. In this case,
# master_fd will become ready in select() and reading from
# master_fd either raises an OSError (Input/output error) on
# Linux or returns EOF on BSD.
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
return
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
# hack by corny: send ctrl+d to slave if stdin is gone
# when having attached this to a `nc -e` and the client disconnects, we want to make sure that we will not leave a process
# hanging around forever but we want to terminate at some point. This is basically a broken pipe.
print("sending EOF to slave", file=sys.stderr, flush=True)
os.write(master_fd, b'\x04')
return
else:
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
try:
os.execlp(argv[0], *argv)
except:
# If we wanted to be really clever, we would use
# the same method as subprocess() to pass the error
# back to the parent. For now just dump stack trace.
sys.excepthook(*sys.exc_info())
finally:
os._exit(1)
assert False, "unreachable or exec failed"
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except OSError:
# Some OSes never return an EOF on pty, just raise
# an error instead.
pass
finally:
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
wpid, exitstatus = os.waitpid(pid, os.WNOHANG)
if wpid == 0 and exitstatus == 0:
print("Child did not exit, sending SIGHUP", file=sys.stderr, flush=True)
os.kill(pid, signal.SIGHUP)
exitstatus = os.waitpid(pid, 0)[1]
return exitstatus
| {
"content_hash": "080012fc53d51f5d5cdc906c1f9c5653",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 138,
"avg_line_length": 31.23671497584541,
"alnum_prop": 0.5842870399010207,
"repo_name": "diekmann/tinyrsh",
"id": "0d601b70e2ba7f65535a92d06b9920de55198e78",
"size": "6466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py-ptysh/pty.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "14199"
},
{
"name": "Haskell",
"bytes": "10501"
},
{
"name": "Makefile",
"bytes": "817"
},
{
"name": "Python",
"bytes": "6771"
},
{
"name": "Rust",
"bytes": "66543"
}
],
"symlink_target": ""
} |
from datetime import timedelta
from unittest import TestCase
import pandas as pd
import pandas.testing
from fireant.dataset.modifiers import Rollup
from fireant.dataset.totals import scrub_totals_from_share_results
from fireant.tests.dataset.mocks import (
dimx0_metricx2_df,
dimx1_str_df,
dimx1_str_totals_df,
dimx2_date_str_df,
dimx2_date_str_totals_df,
dimx2_date_str_totalsx2_df,
mock_dataset,
)
TIMESTAMP_UPPERBOUND = pd.Timestamp.max - timedelta(seconds=1)
class ScrubTotalsTests(TestCase):
def ignore_dimensionless_result_sets(self):
result = scrub_totals_from_share_results(dimx0_metricx2_df, [])
expected = dimx0_metricx2_df
pandas.testing.assert_frame_equal(result, expected)
def test_remove_totals_for_non_rollup_dimensions(self):
result = scrub_totals_from_share_results(dimx1_str_totals_df, [mock_dataset.fields.political_party])
expected = dimx1_str_df
pandas.testing.assert_frame_equal(result, expected)
def test_remove_totals_for_non_rollup_dimensions_with_multiindex(self):
result = scrub_totals_from_share_results(
dimx2_date_str_totals_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
)
expected = dimx2_date_str_df
pandas.testing.assert_frame_equal(result, expected)
def test_remove_totals_for_non_rollup_dimensions_with_multiindex_and_multiple_totals(self):
result = scrub_totals_from_share_results(
dimx2_date_str_totalsx2_df, [mock_dataset.fields.timestamp, mock_dataset.fields.political_party]
)
expected = dimx2_date_str_df
pandas.testing.assert_frame_equal(result, expected)
def test_do_not_remove_totals_for_rollup_dimensions(self):
result = scrub_totals_from_share_results(dimx1_str_totals_df, [Rollup(mock_dataset.fields.political_party)])
expected = dimx1_str_totals_df
pandas.testing.assert_frame_equal(result, expected)
def test_do_not_remove_totals_for_rollup_dimensions_with_multiindex(self):
result = scrub_totals_from_share_results(
dimx2_date_str_totals_df, [mock_dataset.fields.timestamp, Rollup(mock_dataset.fields.political_party)]
)
expected = dimx2_date_str_totals_df
pandas.testing.assert_frame_equal(result, expected)
def test_do_not_remove_totals_for_rollup_dimensions_with_multiindex_and_lower_dimension_totals(self):
result = scrub_totals_from_share_results(
dimx2_date_str_totalsx2_df, [mock_dataset.fields.timestamp, Rollup(mock_dataset.fields.political_party)]
)
expected = dimx2_date_str_totalsx2_df.loc[:TIMESTAMP_UPPERBOUND]
pandas.testing.assert_frame_equal(result, expected)
def test_do_not_remove_totals_for_rollup_dimensions_with_multiindex_and_higher_dimension_totals(self):
result = scrub_totals_from_share_results(
dimx2_date_str_totalsx2_df, [Rollup(mock_dataset.fields.timestamp), mock_dataset.fields.political_party]
)
expected = dimx2_date_str_totalsx2_df.loc[(slice(None), slice('Democrat', 'Republican')), :].append(
dimx2_date_str_totalsx2_df.iloc[-1]
)
pandas.testing.assert_frame_equal(result, expected)
def test_do_not_remove_totals_for_rollup_dimensions_with_multiindex_and_all_rolled_up(self):
result = scrub_totals_from_share_results(
dimx2_date_str_totalsx2_df,
[Rollup(mock_dataset.fields.timestamp), Rollup(mock_dataset.fields.political_party)],
)
expected = dimx2_date_str_totalsx2_df
pandas.testing.assert_frame_equal(result, expected)
| {
"content_hash": "3f8e03e4293fde18433e0a5cb4a3313c",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 116,
"avg_line_length": 37.38383838383838,
"alnum_prop": 0.6995406646852202,
"repo_name": "kayak/fireant",
"id": "27b3cafa47a0af8c8c15c0cdbc6ee850eb16a562",
"size": "3701",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fireant/tests/dataset/test_filter_totals_from_share_results.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "762032"
},
{
"name": "TSQL",
"bytes": "1783"
}
],
"symlink_target": ""
} |
import factory
from factory.django import DjangoModelFactory
from leaflets.models import Leaflet
class LeafletFactory(DjangoModelFactory):
class Meta:
model = Leaflet
title = factory.Sequence(lambda n: "Leaflet %d" % n)
| {
"content_hash": "ae92dd5dfab4f189e19a3a0c6b2c6ffa",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 56,
"avg_line_length": 21.818181818181817,
"alnum_prop": 0.7458333333333333,
"repo_name": "DemocracyClub/electionleaflets",
"id": "5725aad1601797f9b34120d7897cc30e42c5f94c",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electionleaflets/apps/leaflets/tests/model_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "7910"
},
{
"name": "HTML",
"bytes": "92760"
},
{
"name": "JavaScript",
"bytes": "5712"
},
{
"name": "Makefile",
"bytes": "2940"
},
{
"name": "Python",
"bytes": "194406"
},
{
"name": "SCSS",
"bytes": "12241"
}
],
"symlink_target": ""
} |
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium import webdriver
from billmonster import _element_available
from clint import args
from clint.textui import colored, puts
import keyring, sys
def capitalone(user=None, quit_when_finished=True, browser=None):
if not user:
# Get the username from the command line arguments.
user = args.get(0)
# Must supply username.
if user is None:
puts(colored.red('You must supply a username like "python capitalone.py nick"'))
sys.exit()
# Get the user's password from the password backend.
key = keyring.get_password('capitalone.com', user)
# If the key doesn't exist in the password backend.
if key is None:
puts(colored.red("You must store the password for {} in your keyring's backend.".format(user)))
puts('See: http://pypi.python.org/pypi/keyring/#configure-your-keyring-lib')
sys.exit()
# Log what we're currently working on.
puts(colored.blue('\nCapital One ({})'.format(user)))
if not browser:
# Init the WebDriver.
b = webdriver.Firefox()
else:
b = browser
b.get('https://www.capitalone.com/')
# Only credit card accounts are supported at this time.
account_type = b.find_element_by_css_selector('option[value="credit cards"]')
account_type.click()
# Find the username field on the page.
username = b.find_element_by_css_selector('input#eos-userid')
username.send_keys(user)
# Find the password field on the page.
password = b.find_element_by_css_selector('input#eos-password')
password.send_keys(key)
password.submit()
# Wait for an account list.
try:
WebDriverWait(b, timeout=10).until(_element_available(b, 'table.dataTable'))
except TimeoutException:
puts(colored.red("Couldn't find any accounts for that username."))
b.quit()
sys.exit()
amount = b.find_element_by_css_selector('table.dataTable tr.itemSummary td:nth-child(4) p')
print 'Capital One ({}): {}'.format(user, amount.text)
if quit_when_finished:
b.quit()
return b
if __name__ == '__main__':
capitalone()
| {
"content_hash": "ac22b67bfd1480eaa8fe64f1ef42e801",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 103,
"avg_line_length": 29.67105263157895,
"alnum_prop": 0.6660753880266075,
"repo_name": "nicksergeant/billmonster",
"id": "ee113ef5f90f20fe8caf242017406ed14ce4f046",
"size": "2278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capitalone.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16038"
}
],
"symlink_target": ""
} |
import datetime
from django.test import TestCase
from django.utils import timezone
from schedule.models import Event, Rule, Calendar
from schedule.utils import EventListManager
class TestEventListManager(TestCase):
def setUp(self):
weekly = Rule.objects.create(frequency="WEEKLY")
daily = Rule.objects.create(frequency="DAILY")
cal = Calendar.objects.create(name="MyCal")
self.default_tzinfo = timezone.get_default_timezone()
self.event1 = Event(**{
'title': 'Weekly Event',
'start': datetime.datetime(2009, 4, 1, 8, 0, tzinfo=self.default_tzinfo),
'end': datetime.datetime(2009, 4, 1, 9, 0, tzinfo=self.default_tzinfo),
'end_recurring_period': datetime.datetime(2009, 10, 5, 0, 0, tzinfo=self.default_tzinfo),
'rule': weekly,
'calendar': cal
})
self.event1.save()
self.event2 = Event(**{
'title': 'Recent Event',
'start': datetime.datetime(2008, 1, 5, 9, 0, tzinfo=self.default_tzinfo),
'end': datetime.datetime(2008, 1, 5, 10, 0, tzinfo=self.default_tzinfo),
'end_recurring_period': datetime.datetime(2009, 5, 5, 0, 0, tzinfo=self.default_tzinfo),
'rule': daily,
'calendar': cal
})
self.event2.save()
def test_occurrences_after(self):
eml = EventListManager([self.event1, self.event2])
occurrences = eml.occurrences_after(datetime.datetime(2009, 4, 1, 0, 0, tzinfo=self.default_tzinfo))
self.assertEqual(next(occurrences).event, self.event1)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event1)
occurrences = eml.occurrences_after()
self.assertEqual(list(occurrences), [])
| {
"content_hash": "3ddae84acd082d1685bf1e849c1756df",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 108,
"avg_line_length": 44.97959183673469,
"alnum_prop": 0.6447368421052632,
"repo_name": "nharsch/django-scheduler",
"id": "a2e5675bf72118f12c36daf1781721d27f0a4f8b",
"size": "2204",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3394"
},
{
"name": "HTML",
"bytes": "36917"
},
{
"name": "Python",
"bytes": "208514"
}
],
"symlink_target": ""
} |
from sqlalchemy import *
from migrate import *
from nova import log as logging
meta = MetaData()
LOG = logging.getLogger(__name__)
networks = Table('networks', meta,
Column("id", Integer(), primary_key=True, nullable=False))
# Add priority column to networks table
priority = Column('priority', Integer())
def upgrade(migrate_engine):
meta.bind = migrate_engine
try:
networks.create_column(priority)
except Exception:
LOG.error(_("priority column not added to networks table"))
raise
def downgrade(migrate_engine):
meta.bind = migrate_engine
networks.drop_column(priority)
| {
"content_hash": "336491cc62098cd2fe7b555a034d9abf",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 67,
"avg_line_length": 21.066666666666666,
"alnum_prop": 0.6930379746835443,
"repo_name": "russellb/nova",
"id": "6d709e958f344ed41db4f66500baf06753123744",
"size": "1260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4974"
},
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "5611148"
},
{
"name": "Shell",
"bytes": "25380"
}
],
"symlink_target": ""
} |
"""
Setup file for pyddq.
This file was generated with PyScaffold 2.5.6, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import sys
import os
import glob
import subprocess
from setuptools import setup, Command
class IntegrationTestCommand(Command):
description = "A command to run integration tests"
user_options = [("jar=", None, "Path to Drunken Data Quality jar")]
jar = None
addopts = None
def initialize_options(self):
pass
def finalize_options(self):
if self.addopts is None:
exit("error: option addopts should be specified in setup.cfg")
elif self.jar is None:
exit("error: path to Drunken Data Quality jar should be specified")
def run(self):
log4j_path = os.path.abspath("../src/test/resources/log4j.properties")
result = 0
try:
for filename in glob.glob(os.path.join(self.addopts, "test_*.py")):
result = result or subprocess.call([
"spark-submit",
"--driver-java-options",
'"-Dlog4j.configuration=file://{path}"'.format(path=log4j_path),
"--driver-class-path",
self.jar,
filename
])
except OSError as e:
if e.errno == os.errno.ENOENT:
exit("spark-submit is not found!")
else:
exit(str(e))
exit(result)
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
use_pyscaffold=True,
cmdclass={
"integration_test": IntegrationTestCommand
})
if __name__ == "__main__":
setup_package()
| {
"content_hash": "506a962266f604cf800943032c8e1c7d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 84,
"avg_line_length": 31.193548387096776,
"alnum_prop": 0.5739400206825233,
"repo_name": "FRosner/drunken-data-quality",
"id": "315d76aabe54d5d4d426a65341c4e37f1d691552",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "60153"
},
{
"name": "Scala",
"bytes": "229012"
}
],
"symlink_target": ""
} |
from os.path import dirname
from ipkg.build import Formula, File
class e(Formula):
name = 'e'
version = '1.0'
sources = File(dirname(__file__) + '/../../sources/e-1.0.tar.gz')
platform = 'any'
dependencies = ('d',)
def install(self):
pass
| {
"content_hash": "53f8b7619b6b7d2365f4deca9215fde4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 69,
"avg_line_length": 17.3125,
"alnum_prop": 0.5740072202166066,
"repo_name": "pmuller/ipkg",
"id": "fd43720131a54ef9127b9098a96aff6fd582eccf",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/data/formulas/e/e-1.0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139473"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0045_machine_toolbox_id'),
]
operations = [
migrations.AlterField(
model_name='machine',
name='toolbox_id',
field=models.PositiveIntegerField(default=None, blank=True, null=True, unique=True),
preserve_default=True,
),
]
| {
"content_hash": "78b403ab9bcae8b0fb067ff39525b497",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 96,
"avg_line_length": 24.42105263157895,
"alnum_prop": 0.6120689655172413,
"repo_name": "CMU-Robotics-Club/roboticsclub.org",
"id": "235d1474e6d49aef405c2fa1ac3ba3de048664b7",
"size": "488",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "robocrm/migrations/0046_auto_20150311_1727.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4725"
},
{
"name": "HTML",
"bytes": "33977"
},
{
"name": "JavaScript",
"bytes": "5079"
},
{
"name": "Python",
"bytes": "249072"
}
],
"symlink_target": ""
} |
import web
import requests
import traceback
from datetime import datetime
from config import setting
from model import user
import urllib
from util import login
from util import oauth
import json
config = setting.config
render = setting.render
db = setting.db
def common_check(post=[],get=[],need_login=True):
""" request decorator """
def check(post,get):
""" 检查登录与否及参数 """
post_data = web.input(_method="post")
get_data = web.input(_method="get")
user = None
if need_login:
user = login.logged()
if not user:
raise Exception(json.dumps({"code":403,"msg":"access deny"}))
for k in post:
if not k in post_data:
raise Exception(json.dumps({"code":500,"msg":str(k)+" is required"}))
for k in get:
if not k in get_data:
raise Exception(json.dumps({"code":500,"msg":str(k)+" is required"}))
return {"post":post_data,"get":get_data,"user":user}
def checkwrap(fn):
def inner(self):
try:
ctx = check(post,get)
return ok(msg=fn(self,ctx))
except Exception, e:
traceback.print_exc()
return e
return inner
return checkwrap
def ok(msg="ok"):
return json.dumps({"code":200,"msg":msg})
def fail(msg="fail"):
return json.dumbs({"code":500,"msg":msg})
def unfavpiece(pieceid,userid):
where={"pieceid":pieceid,"userid":userid}
row = db.select("fav",where="pieceid=$pieceid and userid=$userid",vars=where)
if not row:
raise Exception(json.dumps({"code":300,"msg":"you've not faved this piece"}))
db.delete("fav",where="pieceid=$pieceid and userid=$userid",vars=where)
def favpiece(pieceid,userid):
row = db.select("fav",where="pieceid=$pieceid and userid=$userid",vars={"pieceid":pieceid,"userid":userid})
if row:
raise Exception(json.dumps({"code":200,"msg":{"id":row[0]["id"]}}))
piece = db.select("piece",where="id=$id",vars={"id":pieceid})
if not piece:
raise Exception(json.dumps({"code":500,"msg":"invalid piece id"}))
db.insert("fav",pieceid=pieceid,userid=userid,addtime=datetime.now())
class add:
@common_check(post=["content"])
def POST(self,ctx):
""" add one """
content = ctx["post"]["content"]
userid = ctx["user"]["id"]
if "link" in ctx["post"]:
link = ctx["post"]["link"]
else:
link = None
pieces = db.select("piece",where="content=$content",vars={"content":content})
# 检查是否已有相同内容
if not pieces:
pieceid = db.insert("piece",content=content,user=userid,addtime=datetime.now(),link=link)
else:
pieceid = pieces[0]["id"]
share = []
if "share" in ctx["post"]:
share = ctx["post"]["share"].split(",")
for key in share:
if not key:
continue
client = oauth.createClientWithName(key,ctx["user"])
post_content = u"「" + content + u"」" + " http://" + web.ctx.host + "/piece/" + str(pieceid)
client.post(post_content)
favpiece(pieceid,userid)
return {"id":pieceid}
class fav:
@common_check(post=["pieceid"])
def POST(self,ctx):
""" fav a piece """
pieceid=ctx["post"]["pieceid"]
favpiece(pieceid,ctx["user"]["id"])
return {"id":pieceid}
class userinfo:
@common_check()
def GET(self,ctx):
user = ctx["user"]
return {"name":user["name"],"id":user["id"],"avatar":user["avatar"]}
class myfavs:
@common_check()
def GET(self,ctx):
input = web.input()
id = ctx["user"]["id"]
if "per" in input:
per = input["per"] or 5
else:
per = 5
try:
page = int(web.input(page=1)["page"])
except Exception, e:
page = 1
if page < 1:
page = 1
vars = {"id":id}
where = "fav.userid=user.id and fav.pieceid=piece.id and user.id=$id"
favs = db.select(["fav","piece","user"]
,what="avatar,piece.id,piece.content,fav.addtime"
,where=where
,vars=vars,limit=per
,offset=(page-1) * per
,order="addtime DESC")
favs = list(favs)
for item in favs:
item["addtime"] = item["addtime"].strftime('%Y-%m-%d')
return favs
class authuser:
@common_check(post=["name","access_token"],need_login=False)
def POST(self,ctx):
name = ctx["post"]["name"]
access_token = ctx["post"]["access_token"]
ret_user = None
client_token = None
cur_user = login.logged()
client = oauth.createClientWithName(name)
user_info = client.get_current_user_info(access_token)
user_info["access_token"] = access_token
if cur_user:
print cur_user
user.update_oauth_userid(name,cur_user["id"],user_info["id"])
user.update_access_token(name,user_info["id"],access_token)
if not cur_user:
print "not cur_user"
oauth_user = user.exist_oauth_user(name,user_info)
if not oauth_user:
ret_user = user.new_oauth_user(name,user_info)
else:
ret_user = oauth_user
user.update_access_token(name,oauth_user[name+"_id"],access_token)
client_token = user.login_oauth_user(name,user_info)
ret_user["client_hash"] = client_token
return ret_user
class unfav:
@common_check(post=["pieceid"])
def POST(self,ctx):
""" fav a piece """
unfavpiece(ctx["post"]["pieceid"],ctx["user"]["id"])
return
class pieces:
def GET(self):
"get pieces"
pieces_itr = db.query('select id,content from piece where private = 0 order by rand() limit 100')
pieces=[]
for piece in pieces_itr:
pieces.append(piece)
return json.dumps(pieces) | {
"content_hash": "630af8586d213e825a80a70327e245bb",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 111,
"avg_line_length": 29.54854368932039,
"alnum_prop": 0.5531460489567932,
"repo_name": "supersheep/huixiang",
"id": "e7f5386e4b4bc72a01e838d388f51d34670274a7",
"size": "6145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controller/ajax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18803"
},
{
"name": "JavaScript",
"bytes": "20420"
},
{
"name": "Python",
"bytes": "59008"
}
],
"symlink_target": ""
} |
'''
Simple RPC
Copyright (c) 2013, Joaquin G. Duo
'''
from simplerpc.expose_api.base.QueueCommandBase import QueueCommandBase
class ImagesBrowser(QueueCommandBase):
'''
#TODO: document Class
'''
def getImagesList(self):
''' #TODO: document method'''
images = {}
for img_id in range(20):
images[img_id] = dict(name='Image%03d' % img_id,
desc='Image %s made by John Doe' % img_id,
url='static/images/Image%03d.jpg' % img_id)
return images
# def deleteImage(self, image_id):
# #here deletes image
# return True
if __name__ == "__main__":
from simplerpc.testing.exposed_api.ExposedModuleAutotester import ExposedModuleAutotester
# ExposedModuleAutotester().createJsUnitTest(overwrite=True)
ExposedModuleAutotester().autoTest()
| {
"content_hash": "5352904fa8f50a7d5735ec9b526b3109",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 93,
"avg_line_length": 32.22222222222222,
"alnum_prop": 0.6195402298850575,
"repo_name": "joaduo/python-simplerpc",
"id": "1a615178dfd752f2ac924ff26929f36557f60792",
"size": "894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_rpc/exposed_api/images/ImagesBrowser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "37891"
},
{
"name": "Python",
"bytes": "96545"
}
],
"symlink_target": ""
} |
from google.appengine.ext import ndb
from mcfw.properties import azzert
from mcfw.utils import Enum
from rogerthat.models import Image
from rogerthat.models.apps import EmbeddedApplication
from rogerthat.models.common import NdbModel
from rogerthat.rpc import users
class AssetTypes(Enum):
BANK = u'bank'
ACCOUNT = u'account'
CREDITCARD = u'creditcard'
class RequiredAction(Enum):
FOLLOW_URL = u'follow_url'
ENTER_CODE = u'enter_code'
class PaymentOAuthSettings(ndb.Model):
client_id = ndb.StringProperty(indexed=False)
secret = ndb.StringProperty(indexed=False)
base_url = ndb.StringProperty(indexed=False)
authorize_path = ndb.StringProperty(indexed=False)
token_path = ndb.StringProperty(indexed=False)
scope = ndb.StringProperty(indexed=False, default='')
@property
def authorize_url(self):
return self.base_url + self.authorize_path
@property
def token_url(self):
return self.base_url + self.token_path
class ConversionRatioValue(NdbModel):
currency = ndb.StringProperty()
rate = ndb.FloatProperty()
class ConversionRatio(NdbModel):
base = ndb.StringProperty()
values = ndb.LocalStructuredProperty(ConversionRatioValue, repeated=True) # type: list[ConversionRatioValue]
class PaymentProvider(NdbModel):
"""
Attributes:
version (long): Implementation version, to check if the app supports this payment provider
description (unicode): String containing markdown with an explanation on how to authorize this payment provider,
used when clicking 'add payment provider'
"""
name = ndb.StringProperty(indexed=False)
logo_id = ndb.IntegerProperty(indexed=False)
version = ndb.IntegerProperty(indexed=False)
description = ndb.TextProperty()
oauth_settings = ndb.LocalStructuredProperty(PaymentOAuthSettings) # type: PaymentOAuthSettings
background_color = ndb.StringProperty(indexed=False)
text_color = ndb.StringProperty(indexed=False)
button_color = ndb.StringProperty(indexed=False, choices=('light', 'primary', 'secondary', 'danger', 'dark'))
black_white_logo_id = ndb.IntegerProperty(indexed=False)
asset_types = ndb.StringProperty(indexed=False, repeated=True, choices=AssetTypes.all())
currencies = ndb.StringProperty(indexed=False, repeated=True)
settings = ndb.JsonProperty()
embedded_application = ndb.KeyProperty(EmbeddedApplication) # type: ndb.Key
app_ids = ndb.StringProperty(repeated=True)
conversion_ratio = ndb.LocalStructuredProperty(ConversionRatio) # type: ConversionRatio
@property
def id(self):
return self.key.string_id().decode('utf8')
def logo_url(self, base_url):
# todo refactor to use gcs instead of abusing the datastore
return Image.url(base_url, self.logo_id)
def black_white_logo_url(self, base_url):
# todo refactor to use gcs instead of abusing the datastore
return Image.url(base_url, self.black_white_logo_id)
@classmethod
def create_key(cls, provider_id):
return ndb.Key(cls, provider_id)
@classmethod
def list_by_app(cls, app_id):
return cls.query().filter(cls.app_ids == app_id)
def redirect_url(self, base_url):
return '%s/payments/callbacks/%s/oauth' % (base_url, self.id)
def get_setting(self, setting):
if not self.settings:
raise ValueError('PaymentProvider %s settings is not set' % self.id)
value = self.settings.get(setting)
if not value:
raise ValueError('PaymentProvider %s setting %s is not set' % (self.id, setting))
return value
def get_currency_rate(self, source_currency, target_currency):
# type: (unicode, unicode) -> float
source_rate = target_rate = 0
if source_currency == self.conversion_ratio.base:
source_rate = 1.0
if target_currency == self.conversion_ratio.base:
target_rate = 1.0
for rate in self.conversion_ratio.values:
if rate.currency == source_currency:
source_rate = rate.rate
if rate.currency == target_currency:
target_rate = rate.rate
if not source_rate or not target_rate:
raise Exception('Cannot calculate currency rate from %s to %s' % (source_currency, target_currency))
return source_rate / target_rate
class PaymentOauthLoginState(ndb.Model):
timestamp = ndb.IntegerProperty(indexed=False)
provider_id = ndb.StringProperty(indexed=False)
app_user = ndb.UserProperty(indexed=False)
code = ndb.StringProperty(indexed=False)
completed = ndb.BooleanProperty(indexed=False)
@property
def state(self):
return self.key.id().decode('utf8')
@property
def app_id(self):
from rogerthat.utils.app import get_app_id_from_app_user
return get_app_id_from_app_user(self.app_user)
@classmethod
def create_key(cls, state):
return ndb.Key(cls, state)
class PaymentUserProvider(ndb.Model):
provider_id = ndb.StringProperty()
token = ndb.JsonProperty()
class PaymentRequiredAction(ndb.Model):
action = ndb.StringProperty() # One of payment.consts.RequiredAction
description = ndb.StringProperty(indexed=False)
data = ndb.JsonProperty()
class PaymentUserAsset(ndb.Model):
provider_id = ndb.StringProperty()
asset_id = ndb.StringProperty()
currency = ndb.StringProperty()
type = ndb.StringProperty(choices=AssetTypes.all())
required_action = ndb.StructuredProperty(PaymentRequiredAction) # type: PaymentRequiredAction
class PaymentUser(ndb.Model):
providers = ndb.StructuredProperty(PaymentUserProvider, repeated=True) # type: list[PaymentUserProvider]
assets = ndb.StructuredProperty(PaymentUserAsset, repeated=True) # type: list[PaymentUserAsset]
@property
def user(self):
return users.User(self.key.string_id().decode('utf8'))
def get_provider(self, provider_id):
for pup in self.providers:
if pup.provider_id == provider_id:
return pup
return None
def has_provider(self, provider_id):
if self.get_provider(provider_id):
return True
return False
def has_asset(self, provider_id, asset_id):
if not self.assets:
return False
for asset in self.assets:
if asset.provider_id == provider_id:
if asset.asset_id == asset_id:
return True
return False
def get_assets_by_provider(self, provider_id):
if not self.assets:
return {}
return {asset.asset_id: asset for asset in self.assets if asset.provider_id == provider_id}
@classmethod
def create_key(cls, app_user):
from rogerthat.dal import parent_ndb_key
return ndb.Key(cls, app_user.email(), parent=parent_ndb_key(app_user))
@classmethod
def list_by_provider_id(cls, provider_id):
return cls.query(cls.providers.provider_id == provider_id)
class PaymentServiceProviderFee(ndb.Model):
amount = ndb.IntegerProperty(default=0)
precision = ndb.IntegerProperty(default=2)
min_amount = ndb.IntegerProperty(default=0)
currency = ndb.StringProperty()
class PaymentServiceProvider(ndb.Model):
provider_id = ndb.StringProperty()
enabled = ndb.BooleanProperty(default=True)
fee = ndb.StructuredProperty(PaymentServiceProviderFee) # type: PaymentServiceProviderFee
settings = ndb.JsonProperty()
class PaymentService(ndb.Model):
providers = ndb.StructuredProperty(PaymentServiceProvider, repeated=True) # type: list[PaymentServiceProvider]
test_providers = ndb.StructuredProperty(PaymentServiceProvider, repeated=True) # type: list[PaymentServiceProvider]
@property
def service_identity_user(self):
return users.User(self.key.string_id().decode('utf8'))
def get_providers(self, test_mode=False):
return self.test_providers if test_mode else self.providers
def add_provider(self, psp, test_mode=False):
self.get_providers(test_mode).append(psp)
def get_provider(self, provider_id, test_mode=False):
for psp in self.get_providers(test_mode):
if psp.provider_id == provider_id:
return psp
return None
def has_provider(self, provider_id, test_mode=False):
if self.get_provider(provider_id, test_mode):
return True
return False
def remove_provider(self, provider_id, test_mode=False):
provider = self.get_provider(provider_id, test_mode)
if provider:
self.get_providers(test_mode).remove(provider)
return True
return False
@classmethod
def create_key(cls, service_identity_user):
from rogerthat.dal import parent_ndb_key_unsafe
azzert("/" in service_identity_user.email())
return ndb.Key(cls, service_identity_user.email(), parent=parent_ndb_key_unsafe(service_identity_user))
@classmethod
def list_by_provider_id(cls, provider_id, test_mode=False):
if test_mode:
return cls.query(cls.test_providers.provider_id == provider_id)
return cls.query(cls.providers.provider_id == provider_id)
class PaymentPendingReceive(ndb.Model):
STATUS_CREATED = u"created"
STATUS_SCANNED = u"scanned"
STATUS_CANCELLED_BY_RECEIVER = u"cancelled_by_receiver"
STATUS_CANCELLED_BY_PAYER = u"cancelled_by_payer"
STATUS_FAILED = u"failed"
STATUS_PENDING = u'pending'
STATUS_SIGNATURE = u"signature"
STATUS_CONFIRMED = u"confirmed"
STATUSES = [STATUS_CREATED, STATUS_SCANNED, STATUS_CANCELLED_BY_RECEIVER, STATUS_CANCELLED_BY_PAYER, STATUS_FAILED,
STATUS_PENDING, STATUS_SIGNATURE, STATUS_CONFIRMED]
timestamp = ndb.IntegerProperty(indexed=False)
provider_id = ndb.StringProperty(indexed=False)
asset_id = ndb.StringProperty(indexed=False)
app_user = ndb.UserProperty(indexed=False)
currency = ndb.StringProperty(indexed=False)
amount = ndb.IntegerProperty(indexed=False)
memo = ndb.StringProperty(indexed=False)
precision = ndb.IntegerProperty(indexed=False)
status = ndb.StringProperty(indexed=False, choices=STATUSES)
pay_user = ndb.UserProperty(indexed=False)
pay_asset_id = ndb.StringProperty(indexed=False)
@property
def transaction_id(self):
return self.key.string_id().decode('utf8')
@classmethod
def create_key(cls, guid):
return ndb.Key(cls, guid)
| {
"content_hash": "61fb69e53dd94a9c32c1504e45703ae5",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 120,
"avg_line_length": 35.98299319727891,
"alnum_prop": 0.6864542962472824,
"repo_name": "our-city-app/oca-backend",
"id": "34fe7eddcab0ce11dad27f042546659f93e2edcc",
"size": "11219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rogerthat/models/payment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "166"
},
{
"name": "CSS",
"bytes": "62142"
},
{
"name": "HTML",
"bytes": "697349"
},
{
"name": "JavaScript",
"bytes": "1023951"
},
{
"name": "PostScript",
"bytes": "4694678"
},
{
"name": "Python",
"bytes": "3149982"
},
{
"name": "Shell",
"bytes": "5839"
},
{
"name": "TypeScript",
"bytes": "690248"
}
],
"symlink_target": ""
} |
from optparse import OptionParser
import json
import ipython_md
import potential_function
def parse_constants(constants):
constant_dictionary = {}
if constants is None: return constant_dictionary
for constant in constants:
(key,value) = constant.split(':')
constant_dictionary[key] = float(value)
return constant_dictionary
def parse_command_line_options():
usage = "usage: %prog [options] INPUT_FILE OUTPUT_FILE"
parser = OptionParser(usage)
parser.add_option("-p", "--potential", dest="potential", help="choose which potential module to use (default is 'spring')", default="spring", type="string")
parser.add_option("-s", "--step_size", dest="step_size", help="the time between each calculation (default is 0.1)", default=0.1, type="float")
parser.add_option("-o", "--output_format", dest="output_format", help="Specify either CSV or JSON for the format of OUTPUT_FILE (default is csv)", default="csv", type="string")
parser.add_option("-d", "--duration", dest="duration", help="Duration of the simulation (default is 1)", default=1, type="float")
parser.add_option("-c", "--constant", dest="constants", help="use as many time as needed to specify constants as name:value (i.e -c epsilon:1.0 -c sigma:0.03)", action="append")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("You must specify an input file and an output file")
return [options, args]
def load_formatter(module_name):
print "Loading %s output formatter..." % (module_name)
return __import__(module_name + "-format")
def load_particle_data(filename):
print "Loading particle data from %s" % (filename)
input_file = open(filename)
particle_data = json.load(input_file)
input_file.close()
return particle_data
def save_output(results, formatter, filename):
print "Saving results to %s" % (filename)
formatted_results = formatter.format(results)
output_file = open(filename, 'w')
output_file.write(formatted_results)
output_file.close()
def main():
(options, args) = parse_command_line_options()
(input_file, output_file) = args
constants = parse_constants(options.constants)
potential = potential_function.load_potential(options.potential, constants)
formatter_module = load_formatter(options.output_format)
particle_data = load_particle_data(input_file)
results = ipython_md.main(options.step_size, options.duration, potential, particle_data)
save_output(results, formatter_module, output_file)
if __name__ == "__main__":
main() | {
"content_hash": "1a549ff7f81ba987a872537041bccdf9",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 190,
"avg_line_length": 48.839285714285715,
"alnum_prop": 0.6552102376599634,
"repo_name": "olcf/iPython-Molecular-Dynamics",
"id": "c3e263a2370f9a32f84bbf821c1549ccee78ecbe",
"size": "2792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10699"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
#from config import *
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
db.engine.pool._use_threadlocal = True
from app import views, models
def register_routes(app):
from .views import front, asset
app.register_blueprint(asset.bp, url_prefix='/asset')
return app
register_routes(app)
db.create_all()
#logger file setting
if not app.debug:
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('tmp/dlop.log', 'a', 1 * 1024 * 1024, 10)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('dlop logging startup')
| {
"content_hash": "efe74dbd7c432ce4111f4adc34b18a46",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 119,
"avg_line_length": 29.566666666666666,
"alnum_prop": 0.7260428410372041,
"repo_name": "spark8103/dlop2",
"id": "3a08ce29d5a24326c80f35d2c9ff64470d2d7c3b",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "123429"
},
{
"name": "JavaScript",
"bytes": "660178"
},
{
"name": "Python",
"bytes": "29280"
}
],
"symlink_target": ""
} |
import os
import unittest
from prudentia.domain import Box
from prudentia.vagrant import VagrantProvider, VagrantExt
from prudentia.simple import SimpleProvider
class TestVagrantProvider(unittest.TestCase):
def setUp(self):
self.tests_path = os.path.dirname(os.path.realpath(__file__))
self.provider = VagrantProvider()
def test_create_tasks_box(self):
ext = VagrantExt()
ext.set_mem(1024)
ext.set_shares([])
ext.set_image('img')
ext.set_provider('provider')
box = Box('vagrant-testbox', self.tests_path + '/../examples/boxes/tasks.yml', 'tasks-host', '10.10.0.23',
VagrantProvider.DEFAULT_USER, VagrantProvider.DEFAULT_PWD, ext)
# The Vagrant add_box will invoke Vagrant as well, we're only interested in the generated Vagrantfile
SimpleProvider.add_box(self.provider, box)
self.provider._generate_vagrant_file()
self.assertTrue('vm.define' in self._read_vagrant_file())
SimpleProvider.remove_box(self.provider, box)
self.provider._generate_vagrant_file()
self.assertFalse('vm.define' in self._read_vagrant_file())
def _read_vagrant_file(self):
with open(VagrantProvider.CONF_FILE, "r") as myfile:
data = myfile.read().replace('\n', '')
return data
| {
"content_hash": "a6a43ba6a64b9b35656a39665100e9e7",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 114,
"avg_line_length": 37.111111111111114,
"alnum_prop": 0.6616766467065869,
"repo_name": "StarterSquad/prudentia",
"id": "115bd966114e117e8d429331f0ead8ad06deddb5",
"size": "1336",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/vagrant_provider_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87577"
},
{
"name": "Shell",
"bytes": "4010"
}
],
"symlink_target": ""
} |
"""
Turtle graphics is a popular way for introducing programming to
kids. It was part of the original Logo programming language developed
by Wally Feurzig and Seymour Papert in 1966.
Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it
the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
the direction it is facing, drawing a line as it moves. Give it the
command turtle.right(25), and it rotates in-place 25 degrees clockwise.
By combining together these and similar commands, intricate shapes and
pictures can easily be drawn.
----- turtle.py
This module is an extended reimplementation of turtle.py from the
Python standard distribution up to Python 2.5. (See: http://www.python.org)
It tries to keep the merits of turtle.py and to be (nearly) 100%
compatible with it. This means in the first place to enable the
learning programmer to use all the commands, classes and methods
interactively when using the module from within IDLE run with
the -n switch.
Roughly it has the following features added:
- Better animation of the turtle movements, especially of turning the
turtle. So the turtles can more easily be used as a visual feedback
instrument by the (beginning) programmer.
- Different turtle shapes, gif-images as turtle shapes, user defined
and user controllable turtle shapes, among them compound
(multicolored) shapes. Turtle shapes can be stretched and tilted, which
makes turtles very versatile geometrical objects.
- Fine control over turtle movement and screen updates via delay(),
and enhanced tracer() and speed() methods.
- Aliases for the most commonly used commands, like fd for forward etc.,
following the early Logo traditions. This reduces the boring work of
typing long sequences of commands, which often occur in a natural way
when kids try to program fancy pictures on their first encounter with
turtle graphics.
- Turtles now have an undo()-method with configurable undo-buffer.
- Some simple commands/methods for creating event driven programs
(mouse-, key-, timer-events). Especially useful for programming games.
- A scrollable Canvas class. The default scrollable Canvas can be
extended interactively as needed while playing around with the turtle(s).
- A TurtleScreen class with methods controlling background color or
background image, window and canvas size and other properties of the
TurtleScreen.
- There is a method, setworldcoordinates(), to install a user defined
coordinate-system for the TurtleScreen.
- The implementation uses a 2-vector class named Vec2D, derived from tuple.
This class is public, so it can be imported by the application programmer,
which makes certain types of computations very natural and compact.
- Appearance of the TurtleScreen and the Turtles at startup/import can be
configured by means of a turtle.cfg configuration file.
The default configuration mimics the appearance of the old turtle module.
- If configured appropriately the module reads in docstrings from a docstring
dictionary in some different language, supplied separately and replaces
the English ones by those read in. There is a utility function
write_docstringdict() to write a dictionary with the original (English)
docstrings to disc, so it can serve as a template for translations.
Behind the scenes there are some features included with possible
extensions in mind. These will be commented and documented elsewhere.
"""
_ver = "turtle 1.1b- - for Python 3.1 - 4. 5. 2009"
# print(_ver)
import tkinter as TK
import types
import math
import time
import inspect
import sys
from os.path import isfile, split, join
from copy import deepcopy
from tkinter import simpledialog
_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen',
'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D']
_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye',
'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas',
'getshapes', 'listen', 'mainloop', 'mode', 'numinput',
'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer',
'register_shape', 'resetscreen', 'screensize', 'setup',
'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update',
'window_height', 'window_width']
_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk',
'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color',
'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd',
'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly',
'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown',
'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd',
'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position',
'pu', 'radians', 'right', 'reset', 'resizemode', 'rt',
'seth', 'setheading', 'setpos', 'setposition', 'settiltangle',
'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle',
'speed', 'st', 'stamp', 'tilt', 'tiltangle', 'towards',
'turtlesize', 'undo', 'undobufferentries', 'up', 'width',
'write', 'xcor', 'ycor']
_tg_utilities = ['write_docstringdict', 'done']
__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions +
_tg_utilities + ['Terminator']) # + _math_functions)
_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos',
'pu', 'rt', 'seth', 'setpos', 'setposition', 'st',
'turtlesize', 'up', 'width']
_CFG = {"width" : 0.5, # Screen
"height" : 0.75,
"canvwidth" : 400,
"canvheight": 300,
"leftright": None,
"topbottom": None,
"mode": "standard", # TurtleScreen
"colormode": 1.0,
"delay": 10,
"undobuffersize": 1000, # RawTurtle
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
"resizemode" : "noresize",
"visible" : True,
"language": "english", # docstrings
"exampleturtle": "turtle",
"examplescreen": "screen",
"title": "Python Turtle Graphics",
"using_IDLE": False
}
def config_dict(filename):
"""Convert content of config-file into dictionary."""
with open(filename, "r") as f:
cfglines = f.readlines()
cfgdict = {}
for line in cfglines:
line = line.strip()
if not line or line.startswith("#"):
continue
try:
key, value = line.split("=")
except:
print("Bad line in config-file %s:\n%s" % (filename,line))
continue
key = key.strip()
value = value.strip()
if value in ["True", "False", "None", "''", '""']:
value = eval(value)
else:
try:
if "." in value:
value = float(value)
else:
value = int(value)
except:
pass # value need not be converted
cfgdict[key] = value
return cfgdict
def readconfig(cfgdict):
"""Read config-files, change configuration-dict accordingly.
If there is a turtle.cfg file in the current working directory,
read it from there. If this contains an importconfig-value,
say 'myway', construct filename turtle_mayway.cfg else use
turtle.cfg and read it from the import-directory, where
turtle.py is located.
Update configuration dictionary first according to config-file,
in the import directory, then according to config-file in the
current working directory.
If no config-file is found, the default configuration is used.
"""
default_cfg = "turtle.cfg"
cfgdict1 = {}
cfgdict2 = {}
if isfile(default_cfg):
cfgdict1 = config_dict(default_cfg)
if "importconfig" in cfgdict1:
default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"]
try:
head, tail = split(__file__)
cfg_file2 = join(head, default_cfg)
except:
cfg_file2 = ""
if isfile(cfg_file2):
cfgdict2 = config_dict(cfg_file2)
_CFG.update(cfgdict2)
_CFG.update(cfgdict1)
try:
readconfig(_CFG)
except:
print ("No configfile read, reason unknown")
class Vec2D(tuple):
"""A 2 dimensional vector class, used as a helper class
for implementing turtle graphics.
May be useful for turtle graphics programs also.
Derived from tuple, so a vector is a tuple!
Provides (for a, b vectors, k number):
a+b vector addition
a-b vector subtraction
a*b inner product
k*a and a*k multiplication with scalar
|a| absolute value of a
a.rotate(angle) rotation
"""
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __add__(self, other):
return Vec2D(self[0]+other[0], self[1]+other[1])
def __mul__(self, other):
if isinstance(other, Vec2D):
return self[0]*other[0]+self[1]*other[1]
return Vec2D(self[0]*other, self[1]*other)
def __rmul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vec2D(self[0]*other, self[1]*other)
def __sub__(self, other):
return Vec2D(self[0]-other[0], self[1]-other[1])
def __neg__(self):
return Vec2D(-self[0], -self[1])
def __abs__(self):
return (self[0]**2 + self[1]**2)**0.5
def rotate(self, angle):
"""rotate self counterclockwise by angle
"""
perp = Vec2D(-self[1], self[0])
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
def __getnewargs__(self):
return (self[0], self[1])
def __repr__(self):
return "(%.2f,%.2f)" % self
##############################################################################
### From here up to line : Tkinter - Interface for turtle.py ###
### May be replaced by an interface to some different graphics toolkit ###
##############################################################################
## helper functions for Scrolled Canvas, to forward Canvas-methods
## to ScrolledCanvas class
def __methodDict(cls, _dict):
"""helper function for Scrolled Canvas"""
baseList = list(cls.__bases__)
baseList.reverse()
for _super in baseList:
__methodDict(_super, _dict)
for key, value in cls.__dict__.items():
if type(value) == types.FunctionType:
_dict[key] = value
def __methods(cls):
"""helper function for Scrolled Canvas"""
_dict = {}
__methodDict(cls, _dict)
return _dict.keys()
__stringBody = (
'def %(method)s(self, *args, **kw): return ' +
'self.%(attribute)s.%(method)s(*args, **kw)')
def __forwardmethods(fromClass, toClass, toPart, exclude = ()):
### MANY CHANGES ###
_dict_1 = {}
__methodDict(toClass, _dict_1)
_dict = {}
mfc = __methods(fromClass)
for ex in _dict_1.keys():
if ex[:1] == '_' or ex[-1:] == '_' or ex in exclude or ex in mfc:
pass
else:
_dict[ex] = _dict_1[ex]
for method, func in _dict.items():
d = {'method': method, 'func': func}
if isinstance(toPart, str):
execString = \
__stringBody % {'method' : method, 'attribute' : toPart}
exec(execString, d)
setattr(fromClass, method, d[method]) ### NEWU!
class ScrolledCanvas(TK.Frame):
"""Modeled after the scrolled canvas class from Grayons's Tkinter book.
Used as the default canvas, which pops up automatically when
using turtle graphics functions or the Turtle class.
"""
def __init__(self, master, width=500, height=350,
canvwidth=600, canvheight=500):
TK.Frame.__init__(self, master, width=width, height=height)
self._rootwindow = self.winfo_toplevel()
self.width, self.height = width, height
self.canvwidth, self.canvheight = canvwidth, canvheight
self.bg = "white"
self._canvas = TK.Canvas(master, width=width, height=height,
bg=self.bg, relief=TK.SUNKEN, borderwidth=2)
self.hscroll = TK.Scrollbar(master, command=self._canvas.xview,
orient=TK.HORIZONTAL)
self.vscroll = TK.Scrollbar(master, command=self._canvas.yview)
self._canvas.configure(xscrollcommand=self.hscroll.set,
yscrollcommand=self.vscroll.set)
self.rowconfigure(0, weight=1, minsize=0)
self.columnconfigure(0, weight=1, minsize=0)
self._canvas.grid(padx=1, in_ = self, pady=1, row=0,
column=0, rowspan=1, columnspan=1, sticky='news')
self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
column=1, rowspan=1, columnspan=1, sticky='news')
self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
column=0, rowspan=1, columnspan=1, sticky='news')
self.reset()
self._rootwindow.bind('<Configure>', self.onResize)
def reset(self, canvwidth=None, canvheight=None, bg = None):
"""Adjust canvas and scrollbars according to given canvas size."""
if canvwidth:
self.canvwidth = canvwidth
if canvheight:
self.canvheight = canvheight
if bg:
self.bg = bg
self._canvas.config(bg=bg,
scrollregion=(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2))
self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) /
self.canvwidth)
self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) /
self.canvheight)
self.adjustScrolls()
def adjustScrolls(self):
""" Adjust scrollbars according to window- and canvas-size.
"""
cwidth = self._canvas.winfo_width()
cheight = self._canvas.winfo_height()
self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)
self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)
if cwidth < self.canvwidth or cheight < self.canvheight:
self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
column=0, rowspan=1, columnspan=1, sticky='news')
self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
column=1, rowspan=1, columnspan=1, sticky='news')
else:
self.hscroll.grid_forget()
self.vscroll.grid_forget()
def onResize(self, event):
"""self-explanatory"""
self.adjustScrolls()
def bbox(self, *args):
""" 'forward' method, which canvas itself has inherited...
"""
return self._canvas.bbox(*args)
def cget(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
return self._canvas.cget(*args, **kwargs)
def config(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.config(*args, **kwargs)
def bind(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.bind(*args, **kwargs)
def unbind(self, *args, **kwargs):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.unbind(*args, **kwargs)
def focus_force(self):
""" 'forward' method, which canvas itself has inherited...
"""
self._canvas.focus_force()
__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas')
class _Root(TK.Tk):
"""Root class for Screen based on Tkinter."""
def __init__(self):
TK.Tk.__init__(self)
def setupcanvas(self, width, height, cwidth, cheight):
self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight)
self._canvas.pack(expand=1, fill="both")
def _getcanvas(self):
return self._canvas
def set_geometry(self, width, height, startx, starty):
self.geometry("%dx%d%+d%+d"%(width, height, startx, starty))
def ondestroy(self, destroy):
self.wm_protocol("WM_DELETE_WINDOW", destroy)
def win_width(self):
return self.winfo_screenwidth()
def win_height(self):
return self.winfo_screenheight()
Canvas = TK.Canvas
class TurtleScreenBase(object):
"""Provide the basic graphics functionality.
Interface between Tkinter and turtle.py.
To port turtle.py to some different graphics toolkit
a corresponding TurtleScreenBase class has to be implemented.
"""
@staticmethod
def _blankimage():
"""return a blank image object
"""
img = TK.PhotoImage(width=1, height=1)
img.blank()
return img
@staticmethod
def _image(filename):
"""return an image object containing the
imagedata from a gif-file named filename.
"""
return TK.PhotoImage(file=filename)
def __init__(self, cv):
self.cv = cv
if isinstance(cv, ScrolledCanvas):
w = self.cv.canvwidth
h = self.cv.canvheight
else: # expected: ordinary TK.Canvas
w = int(self.cv.cget("width"))
h = int(self.cv.cget("height"))
self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 ))
self.canvwidth = w
self.canvheight = h
self.xscale = self.yscale = 1.0
def _createpoly(self):
"""Create an invisible polygon item on canvas self.cv)
"""
return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="")
def _drawpoly(self, polyitem, coordlist, fill=None,
outline=None, width=None, top=False):
"""Configure polygonitem polyitem according to provided
arguments:
coordlist is sequence of coordinates
fill is filling color
outline is outline color
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
cl = []
for x, y in coordlist:
cl.append(x * self.xscale)
cl.append(-y * self.yscale)
self.cv.coords(polyitem, *cl)
if fill is not None:
self.cv.itemconfigure(polyitem, fill=fill)
if outline is not None:
self.cv.itemconfigure(polyitem, outline=outline)
if width is not None:
self.cv.itemconfigure(polyitem, width=width)
if top:
self.cv.tag_raise(polyitem)
def _createline(self):
"""Create an invisible line item on canvas self.cv)
"""
return self.cv.create_line(0, 0, 0, 0, fill="", width=2,
capstyle = TK.ROUND)
def _drawline(self, lineitem, coordlist=None,
fill=None, width=None, top=False):
"""Configure lineitem according to provided arguments:
coordlist is sequence of coordinates
fill is drawing color
width is width of drawn line.
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
if coordlist is not None:
cl = []
for x, y in coordlist:
cl.append(x * self.xscale)
cl.append(-y * self.yscale)
self.cv.coords(lineitem, *cl)
if fill is not None:
self.cv.itemconfigure(lineitem, fill=fill)
if width is not None:
self.cv.itemconfigure(lineitem, width=width)
if top:
self.cv.tag_raise(lineitem)
def _delete(self, item):
"""Delete graphics item from canvas.
If item is"all" delete all graphics items.
"""
self.cv.delete(item)
def _update(self):
"""Redraw graphics items on canvas
"""
self.cv.update()
def _delay(self, delay):
"""Delay subsequent canvas actions for delay ms."""
self.cv.after(delay)
def _iscolorstring(self, color):
"""Check if the string color is a legal Tkinter color string.
"""
try:
rgb = self.cv.winfo_rgb(color)
ok = True
except TK.TclError:
ok = False
return ok
def _bgcolor(self, color=None):
"""Set canvas' backgroundcolor if color is not None,
else return backgroundcolor."""
if color is not None:
self.cv.config(bg = color)
self._update()
else:
return self.cv.cget("bg")
def _write(self, pos, txt, align, font, pencolor):
"""Write txt at pos in canvas with specified font
and color.
Return text item and x-coord of right bottom corner
of text's bounding box."""
x, y = pos
x = x * self.xscale
y = y * self.yscale
anchor = {"left":"sw", "center":"s", "right":"se" }
item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align],
fill = pencolor, font = font)
x0, y0, x1, y1 = self.cv.bbox(item)
self.cv.update()
return item, x1-1
## def _dot(self, pos, size, color):
## """may be implemented for some other graphics toolkit"""
def _onclick(self, item, fun, num=1, add=None):
"""Bind fun to mouse-click event on turtle.
fun must be a function with two arguments, the coordinates
of the clicked point on the canvas.
num, the number of the mouse-button defaults to 1
"""
if fun is None:
self.cv.tag_unbind(item, "<Button-%s>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.tag_bind(item, "<Button-%s>" % num, eventfun, add)
def _onrelease(self, item, fun, num=1, add=None):
"""Bind fun to mouse-button-release event on turtle.
fun must be a function with two arguments, the coordinates
of the point on the canvas where mouse button is released.
num, the number of the mouse-button defaults to 1
If a turtle is clicked, first _onclick-event will be performed,
then _onscreensclick-event.
"""
if fun is None:
self.cv.tag_unbind(item, "<Button%s-ButtonRelease>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.tag_bind(item, "<Button%s-ButtonRelease>" % num,
eventfun, add)
def _ondrag(self, item, fun, num=1, add=None):
"""Bind fun to mouse-move-event (with pressed mouse button) on turtle.
fun must be a function with two arguments, the coordinates of the
actual mouse position on the canvas.
num, the number of the mouse-button defaults to 1
Every sequence of mouse-move-events on a turtle is preceded by a
mouse-click event on that turtle.
"""
if fun is None:
self.cv.tag_unbind(item, "<Button%s-Motion>" % num)
else:
def eventfun(event):
try:
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
except:
pass
self.cv.tag_bind(item, "<Button%s-Motion>" % num, eventfun, add)
def _onscreenclick(self, fun, num=1, add=None):
"""Bind fun to mouse-click event on canvas.
fun must be a function with two arguments, the coordinates
of the clicked point on the canvas.
num, the number of the mouse-button defaults to 1
If a turtle is clicked, first _onclick-event will be performed,
then _onscreensclick-event.
"""
if fun is None:
self.cv.unbind("<Button-%s>" % num)
else:
def eventfun(event):
x, y = (self.cv.canvasx(event.x)/self.xscale,
-self.cv.canvasy(event.y)/self.yscale)
fun(x, y)
self.cv.bind("<Button-%s>" % num, eventfun, add)
def _onkeyrelease(self, fun, key):
"""Bind fun to key-release event of key.
Canvas must have focus. See method listen
"""
if fun is None:
self.cv.unbind("<KeyRelease-%s>" % key, None)
else:
def eventfun(event):
fun()
self.cv.bind("<KeyRelease-%s>" % key, eventfun)
def _onkeypress(self, fun, key=None):
"""If key is given, bind fun to key-press event of key.
Otherwise bind fun to any key-press.
Canvas must have focus. See method listen.
"""
if fun is None:
if key is None:
self.cv.unbind("<KeyPress>", None)
else:
self.cv.unbind("<KeyPress-%s>" % key, None)
else:
def eventfun(event):
fun()
if key is None:
self.cv.bind("<KeyPress>", eventfun)
else:
self.cv.bind("<KeyPress-%s>" % key, eventfun)
def _listen(self):
"""Set focus on canvas (in order to collect key-events)
"""
self.cv.focus_force()
def _ontimer(self, fun, t):
"""Install a timer, which calls fun after t milliseconds.
"""
if t == 0:
self.cv.after_idle(fun)
else:
self.cv.after(t, fun)
def _createimage(self, image):
"""Create and return image item on canvas.
"""
return self.cv.create_image(0, 0, image=image)
def _drawimage(self, item, pos, image):
"""Configure image item as to draw image object
at position (x,y) on canvas)
"""
x, y = pos
self.cv.coords(item, (x * self.xscale, -y * self.yscale))
self.cv.itemconfig(item, image=image)
def _setbgpic(self, item, image):
"""Configure image item as to draw image object
at center of canvas. Set item to the first item
in the displaylist, so it will be drawn below
any other item ."""
self.cv.itemconfig(item, image=image)
self.cv.tag_lower(item)
def _type(self, item):
"""Return 'line' or 'polygon' or 'image' depending on
type of item.
"""
return self.cv.type(item)
def _pointlist(self, item):
"""returns list of coordinate-pairs of points of item
Example (for insiders):
>>> from turtle import *
>>> getscreen()._pointlist(getturtle().turtle._item)
[(0.0, 9.9999999999999982), (0.0, -9.9999999999999982),
(9.9999999999999982, 0.0)]
>>> """
cl = self.cv.coords(item)
pl = [(cl[i], -cl[i+1]) for i in range(0, len(cl), 2)]
return pl
def _setscrollregion(self, srx1, sry1, srx2, sry2):
self.cv.config(scrollregion=(srx1, sry1, srx2, sry2))
def _rescale(self, xscalefactor, yscalefactor):
items = self.cv.find_all()
for item in items:
coordinates = list(self.cv.coords(item))
newcoordlist = []
while coordinates:
x, y = coordinates[:2]
newcoordlist.append(x * xscalefactor)
newcoordlist.append(y * yscalefactor)
coordinates = coordinates[2:]
self.cv.coords(item, *newcoordlist)
def _resize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on. Does
not alter the drawing window.
"""
# needs amendment
if not isinstance(self.cv, ScrolledCanvas):
return self.canvwidth, self.canvheight
if canvwidth is canvheight is bg is None:
return self.cv.canvwidth, self.cv.canvheight
if canvwidth is not None:
self.canvwidth = canvwidth
if canvheight is not None:
self.canvheight = canvheight
self.cv.reset(canvwidth, canvheight, bg)
def _window_size(self):
""" Return the width and height of the turtle window.
"""
width = self.cv.winfo_width()
if width <= 1: # the window isn't managed by a geometry manager
width = self.cv['width']
height = self.cv.winfo_height()
if height <= 1: # the window isn't managed by a geometry manager
height = self.cv['height']
return width, height
def mainloop(self):
"""Starts event loop - calling Tkinter's mainloop function.
No argument.
Must be last statement in a turtle graphics program.
Must NOT be used if a script is run from within IDLE in -n mode
(No subprocess) - for interactive use of turtle graphics.
Example (for a TurtleScreen instance named screen):
>>> screen.mainloop()
"""
TK.mainloop()
def textinput(self, title, prompt):
"""Pop up a dialog window for input of a string.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what information to input.
Return the string input
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.textinput("NIM", "Name of first player:")
"""
return simpledialog.askstring(title, prompt)
def numinput(self, title, prompt, default=None, minval=None, maxval=None):
"""Pop up a dialog window for input of a number.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what numerical information to input.
default: default value
minval: minimum value for imput
maxval: maximum value for input
The number input must be in the range minval .. maxval if these are
given. If not, a hint is issued and the dialog remains open for
correction. Return the number input.
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000)
"""
return simpledialog.askfloat(title, prompt, initialvalue=default,
minvalue=minval, maxvalue=maxval)
##############################################################################
### End of Tkinter - interface ###
##############################################################################
class Terminator (Exception):
"""Will be raised in TurtleScreen.update, if _RUNNING becomes False.
This stops execution of a turtle graphics script.
Main purpose: use in the Demo-Viewer turtle.Demo.py.
"""
pass
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
class Shape(object):
"""Data structure modeling shapes.
attribute _type is one of "polygon", "image", "compound"
attribute _data is - depending on _type a poygon-tuple,
an image or a list constructed using the addcomponent method.
"""
def __init__(self, type_, data=None):
self._type = type_
if type_ == "polygon":
if isinstance(data, list):
data = tuple(data)
elif type_ == "image":
if isinstance(data, str):
if data.lower().endswith(".gif") and isfile(data):
data = TurtleScreen._image(data)
# else data assumed to be Photoimage
elif type_ == "compound":
data = []
else:
raise TurtleGraphicsError("There is no shape type %s" % type_)
self._data = data
def addcomponent(self, poly, fill, outline=None):
"""Add component to a shape of type compound.
Arguments: poly is a polygon, i. e. a tuple of number pairs.
fill is the fillcolor of the component,
outline is the outline color of the component.
call (for a Shapeobject namend s):
-- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue")
Example:
>>> poly = ((0,0),(10,-5),(0,10),(-10,-5))
>>> s = Shape("compound")
>>> s.addcomponent(poly, "red", "blue")
>>> # .. add more components and then use register_shape()
"""
if self._type != "compound":
raise TurtleGraphicsError("Cannot add component to %s Shape"
% self._type)
if outline is None:
outline = fill
self._data.append([poly, fill, outline])
class Tbuffer(object):
"""Ring buffer used as undobuffer for RawTurtle objects."""
def __init__(self, bufsize=10):
self.bufsize = bufsize
self.buffer = [[None]] * bufsize
self.ptr = -1
self.cumulate = False
def reset(self, bufsize=None):
if bufsize is None:
for i in range(self.bufsize):
self.buffer[i] = [None]
else:
self.bufsize = bufsize
self.buffer = [[None]] * bufsize
self.ptr = -1
def push(self, item):
if self.bufsize > 0:
if not self.cumulate:
self.ptr = (self.ptr + 1) % self.bufsize
self.buffer[self.ptr] = item
else:
self.buffer[self.ptr].append(item)
def pop(self):
if self.bufsize > 0:
item = self.buffer[self.ptr]
if item is None:
return None
else:
self.buffer[self.ptr] = [None]
self.ptr = (self.ptr - 1) % self.bufsize
return (item)
def nr_of_items(self):
return self.bufsize - self.buffer.count([None])
def __repr__(self):
return str(self.buffer) + " " + str(self.ptr)
class TurtleScreen(TurtleScreenBase):
"""Provides screen oriented methods like setbg etc.
Only relies upon the methods of TurtleScreenBase and NOT
upon components of the underlying graphics toolkit -
which is Tkinter in this case.
"""
_RUNNING = True
def __init__(self, cv, mode=_CFG["mode"],
colormode=_CFG["colormode"], delay=_CFG["delay"]):
self._shapes = {
"arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))),
"turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7),
(-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6),
(-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6),
(5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10),
(2,14))),
"circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88),
(5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51),
(-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0),
(-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09),
(-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51),
(5.88,-8.09), (8.09,-5.88), (9.51,-3.09))),
"square" : Shape("polygon", ((10,-10), (10,10), (-10,10),
(-10,-10))),
"triangle" : Shape("polygon", ((10,-5.77), (0,11.55),
(-10,-5.77))),
"classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))),
"blank" : Shape("image", self._blankimage())
}
self._bgpics = {"nopic" : ""}
TurtleScreenBase.__init__(self, cv)
self._mode = mode
self._delayvalue = delay
self._colormode = _CFG["colormode"]
self._keys = []
self.clear()
if sys.platform == 'darwin':
# Force Turtle window to the front on OS X. This is needed because
# the Turtle window will show behind the Terminal window when you
# start the demo from the command line.
rootwindow = cv.winfo_toplevel()
rootwindow.call('wm', 'attributes', '.', '-topmost', '1')
rootwindow.call('wm', 'attributes', '.', '-topmost', '0')
def clear(self):
"""Delete all drawings and all turtles from the TurtleScreen.
No argument.
Reset empty TurtleScreen to its initial state: white background,
no backgroundimage, no eventbindings and tracing on.
Example (for a TurtleScreen instance named screen):
>>> screen.clear()
Note: this method is not available as function.
"""
self._delayvalue = _CFG["delay"]
self._colormode = _CFG["colormode"]
self._delete("all")
self._bgpic = self._createimage("")
self._bgpicname = "nopic"
self._tracing = 1
self._updatecounter = 0
self._turtles = []
self.bgcolor("white")
for btn in 1, 2, 3:
self.onclick(None, btn)
self.onkeypress(None)
for key in self._keys[:]:
self.onkey(None, key)
self.onkeypress(None, key)
Turtle._pen = None
def mode(self, mode=None):
"""Set turtle-mode ('standard', 'logo' or 'world') and perform reset.
Optional argument:
mode -- on of the strings 'standard', 'logo' or 'world'
Mode 'standard' is compatible with turtle.py.
Mode 'logo' is compatible with most Logo-Turtle-Graphics.
Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in
this mode angles appear distorted if x/y unit-ratio doesn't equal 1.
If mode is not given, return the current mode.
Mode Initial turtle heading positive angles
------------|-------------------------|-------------------
'standard' to the right (east) counterclockwise
'logo' upward (north) clockwise
Examples:
>>> mode('logo') # resets turtle heading to north
>>> mode()
'logo'
"""
if mode is None:
return self._mode
mode = mode.lower()
if mode not in ["standard", "logo", "world"]:
raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode)
self._mode = mode
if mode in ["standard", "logo"]:
self._setscrollregion(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2)
self.xscale = self.yscale = 1.0
self.reset()
def setworldcoordinates(self, llx, lly, urx, ury):
"""Set up a user defined coordinate-system.
Arguments:
llx -- a number, x-coordinate of lower left corner of canvas
lly -- a number, y-coordinate of lower left corner of canvas
urx -- a number, x-coordinate of upper right corner of canvas
ury -- a number, y-coordinate of upper right corner of canvas
Set up user coodinat-system and switch to mode 'world' if necessary.
This performs a screen.reset. If mode 'world' is already active,
all drawings are redrawn according to the new coordinates.
But ATTENTION: in user-defined coordinatesystems angles may appear
distorted. (see Screen.mode())
Example (for a TurtleScreen instance named screen):
>>> screen.setworldcoordinates(-10,-0.5,50,1.5)
>>> for _ in range(36):
... left(10)
... forward(0.5)
"""
if self.mode() != "world":
self.mode("world")
xspan = float(urx - llx)
yspan = float(ury - lly)
wx, wy = self._window_size()
self.screensize(wx-20, wy-20)
oldxscale, oldyscale = self.xscale, self.yscale
self.xscale = self.canvwidth / xspan
self.yscale = self.canvheight / yspan
srx1 = llx * self.xscale
sry1 = -ury * self.yscale
srx2 = self.canvwidth + srx1
sry2 = self.canvheight + sry1
self._setscrollregion(srx1, sry1, srx2, sry2)
self._rescale(self.xscale/oldxscale, self.yscale/oldyscale)
self.update()
def register_shape(self, name, shape=None):
"""Adds a turtle shape to TurtleScreen's shapelist.
Arguments:
(1) name is the name of a gif-file and shape is None.
Installs the corresponding image shape.
!! Image-shapes DO NOT rotate when turning the turtle,
!! so they do not display the heading of the turtle!
(2) name is an arbitrary string and shape is a tuple
of pairs of coordinates. Installs the corresponding
polygon shape
(3) name is an arbitrary string and shape is a
(compound) Shape object. Installs the corresponding
compound shape.
To use a shape, you have to issue the command shape(shapename).
call: register_shape("turtle.gif")
--or: register_shape("tri", ((0,0), (10,10), (-10,10)))
Example (for a TurtleScreen instance named screen):
>>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3)))
"""
if shape is None:
# image
if name.lower().endswith(".gif"):
shape = Shape("image", self._image(name))
else:
raise TurtleGraphicsError("Bad arguments for register_shape.\n"
+ "Use help(register_shape)" )
elif isinstance(shape, tuple):
shape = Shape("polygon", shape)
## else shape assumed to be Shape-instance
self._shapes[name] = shape
def _colorstr(self, color):
"""Return color string corresponding to args.
Argument may be a string or a tuple of three
numbers corresponding to actual colormode,
i.e. in the range 0<=n<=colormode.
If the argument doesn't represent a color,
an error is raised.
"""
if len(color) == 1:
color = color[0]
if isinstance(color, str):
if self._iscolorstring(color) or color == "":
return color
else:
raise TurtleGraphicsError("bad color string: %s" % str(color))
try:
r, g, b = color
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if self._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(color))
return "#%02x%02x%02x" % (r, g, b)
def _color(self, cstr):
if not cstr.startswith("#"):
return cstr
if len(cstr) == 7:
cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)]
elif len(cstr) == 4:
cl = [16*int(cstr[h], 16) for h in cstr[1:]]
else:
raise TurtleGraphicsError("bad colorstring: %s" % cstr)
return tuple([c * self._colormode/255 for c in cl])
def colormode(self, cmode=None):
"""Return the colormode or set it to 1.0 or 255.
Optional argument:
cmode -- one of the values 1.0 or 255
r, g, b values of colortriples have to be in range 0..cmode.
Example (for a TurtleScreen instance named screen):
>>> screen.colormode()
1.0
>>> screen.colormode(255)
>>> pencolor(240,160,80)
"""
if cmode is None:
return self._colormode
if cmode == 1.0:
self._colormode = float(cmode)
elif cmode == 255:
self._colormode = int(cmode)
def reset(self):
"""Reset all Turtles on the Screen to their initial state.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.reset()
"""
for turtle in self._turtles:
turtle._setmode(self._mode)
turtle.reset()
def turtles(self):
"""Return the list of turtles on the screen.
Example (for a TurtleScreen instance named screen):
>>> screen.turtles()
[<turtle.Turtle object at 0x00E11FB0>]
"""
return self._turtles
def bgcolor(self, *args):
"""Set or return backgroundcolor of the TurtleScreen.
Arguments (if given): a color string or three numbers
in the range 0..colormode or a 3-tuple of such numbers.
Example (for a TurtleScreen instance named screen):
>>> screen.bgcolor("orange")
>>> screen.bgcolor()
'orange'
>>> screen.bgcolor(0.5,0,0.5)
>>> screen.bgcolor()
'#800080'
"""
if args:
color = self._colorstr(args)
else:
color = None
color = self._bgcolor(color)
if color is not None:
color = self._color(color)
return color
def tracer(self, n=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a TurtleScreen instance named screen):
>>> screen.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... fd(dist)
... rt(90)
... dist += 2
"""
if n is None:
return self._tracing
self._tracing = int(n)
self._updatecounter = 0
if delay is not None:
self._delayvalue = int(delay)
if self._tracing:
self.update()
def delay(self, delay=None):
""" Return or set the drawing delay in milliseconds.
Optional argument:
delay -- positive integer
Example (for a TurtleScreen instance named screen):
>>> screen.delay(15)
>>> screen.delay()
15
"""
if delay is None:
return self._delayvalue
self._delayvalue = int(delay)
def _incrementudc(self):
"""Increment update counter."""
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNNING = True
raise Terminator
if self._tracing > 0:
self._updatecounter += 1
self._updatecounter %= self._tracing
def update(self):
"""Perform a TurtleScreen update.
"""
tracing = self._tracing
self._tracing = True
for t in self.turtles():
t._update_data()
t._drawturtle()
self._tracing = tracing
self._update()
def window_width(self):
""" Return the width of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_width()
640
"""
return self._window_size()[0]
def window_height(self):
""" Return the height of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_height()
480
"""
return self._window_size()[1]
def getcanvas(self):
"""Return the Canvas of this TurtleScreen.
No argument.
Example (for a Screen instance named screen):
>>> cv = screen.getcanvas()
>>> cv
<turtle.ScrolledCanvas instance at 0x010742D8>
"""
return self.cv
def getshapes(self):
"""Return a list of names of all currently available turtle shapes.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.getshapes()
['arrow', 'blank', 'circle', ... , 'turtle']
"""
return sorted(self._shapes.keys())
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
num -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen)
>>> screen.onclick(goto)
>>> # Subsequently clicking into the TurtleScreen will
>>> # make the turtle move to the clicked point.
>>> screen.onclick(None)
"""
self._onscreenclick(fun, btn, add)
def onkey(self, fun, key):
"""Bind fun to key-release event of key.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkey(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, consequently drawing a hexagon
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key not in self._keys:
self._keys.append(key)
self._onkeyrelease(fun, key)
def onkeypress(self, fun, key=None):
"""Bind fun to key-press event of key if key is given,
or to any key-press-event if no key is given.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkeypress(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, or by keeping pressed the up-arrow key.
consequently drawing a hexagon.
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key is not None and key not in self._keys:
self._keys.append(key)
self._onkeypress(fun, key)
def listen(self, xdummy=None, ydummy=None):
"""Set focus on TurtleScreen (in order to collect key-events)
No arguments.
Dummy arguments are provided in order
to be able to pass listen to the onclick method.
Example (for a TurtleScreen instance named screen):
>>> screen.listen()
"""
self._listen()
def ontimer(self, fun, t=0):
"""Install a timer, which calls fun after t milliseconds.
Arguments:
fun -- a function with no arguments.
t -- a number >= 0
Example (for a TurtleScreen instance named screen):
>>> running = True
>>> def f():
... if running:
... fd(50)
... lt(60)
... screen.ontimer(f, 250)
...
>>> f() # makes the turtle marching around
>>> running = False
"""
self._ontimer(fun, t)
def bgpic(self, picname=None):
"""Set background image or return name of current backgroundimage.
Optional argument:
picname -- a string, name of a gif-file or "nopic".
If picname is a filename, set the corresponding image as background.
If picname is "nopic", delete backgroundimage, if present.
If picname is None, return the filename of the current backgroundimage.
Example (for a TurtleScreen instance named screen):
>>> screen.bgpic()
'nopic'
>>> screen.bgpic("landscape.gif")
>>> screen.bgpic()
'landscape.gif'
"""
if picname is None:
return self._bgpicname
if picname not in self._bgpics:
self._bgpics[picname] = self._image(picname)
self._setbgpic(self._bgpic, self._bgpics[picname])
self._bgpicname = picname
def screensize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on.
Optional arguments:
canvwidth -- positive integer, new width of canvas in pixels
canvheight -- positive integer, new height of canvas in pixels
bg -- colorstring or color-tuple, new backgroundcolor
If no arguments are given, return current (canvaswidth, canvasheight)
Do not alter the drawing window. To observe hidden parts of
the canvas use the scrollbars. (Can make visible those parts
of a drawing, which were outside the canvas before!)
Example (for a Turtle instance named turtle):
>>> turtle.screensize(2000,1500)
>>> # e.g. to search for an erroneously escaped turtle ;-)
"""
return self._resize(canvwidth, canvheight, bg)
onscreenclick = onclick
resetscreen = reset
clearscreen = clear
addshape = register_shape
onkeyrelease = onkey
class TNavigator(object):
"""Navigation part of the RawTurtle.
Implements methods for turtle movement.
"""
START_ORIENTATION = {
"standard": Vec2D(1.0, 0.0),
"world" : Vec2D(1.0, 0.0),
"logo" : Vec2D(0.0, 1.0) }
DEFAULT_MODE = "standard"
DEFAULT_ANGLEOFFSET = 0
DEFAULT_ANGLEORIENT = 1
def __init__(self, mode=DEFAULT_MODE):
self._angleOffset = self.DEFAULT_ANGLEOFFSET
self._angleOrient = self.DEFAULT_ANGLEORIENT
self._mode = mode
self.undobuffer = None
self.degrees()
self._mode = None
self._setmode(mode)
TNavigator.reset(self)
def reset(self):
"""reset turtle to its initial values
Will be overwritten by parent class
"""
self._position = Vec2D(0.0, 0.0)
self._orient = TNavigator.START_ORIENTATION[self._mode]
def _setmode(self, mode=None):
"""Set turtle-mode to 'standard', 'world' or 'logo'.
"""
if mode is None:
return self._mode
if mode not in ["standard", "logo", "world"]:
return
self._mode = mode
if mode in ["standard", "world"]:
self._angleOffset = 0
self._angleOrient = 1
else: # mode == "logo":
self._angleOffset = self._fullcircle/4.
self._angleOrient = -1
def _setDegreesPerAU(self, fullcircle):
"""Helper function for degrees() and radians()"""
self._fullcircle = fullcircle
self._degreesPerAU = 360/fullcircle
if self._mode == "standard":
self._angleOffset = 0
else:
self._angleOffset = fullcircle/4.
def degrees(self, fullcircle=360.0):
""" Set angle measurement units to degrees.
Optional argument:
fullcircle - a number
Set angle measurement units, i. e. set number
of 'degrees' for a full circle. Dafault value is
360 degrees.
Example (for a Turtle instance named turtle):
>>> turtle.left(90)
>>> turtle.heading()
90
Change angle measurement unit to grad (also known as gon,
grade, or gradian and equals 1/100-th of the right angle.)
>>> turtle.degrees(400.0)
>>> turtle.heading()
100
"""
self._setDegreesPerAU(fullcircle)
def radians(self):
""" Set the angle measurement units to radians.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.heading()
90
>>> turtle.radians()
>>> turtle.heading()
1.5707963267948966
"""
self._setDegreesPerAU(2*math.pi)
def _go(self, distance):
"""move turtle forward by specified distance"""
ende = self._position + self._orient * distance
self._goto(ende)
def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
angle *= self._degreesPerAU
self._orient = self._orient.rotate(angle)
def _goto(self, end):
"""move turtle to position end."""
self._position = end
def forward(self, distance):
"""Move the turtle forward by the specified distance.
Aliases: forward | fd
Argument:
distance -- a number (integer or float)
Move the turtle forward by the specified distance, in the direction
the turtle is headed.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.forward(25)
>>> turtle.position()
(25.00,0.00)
>>> turtle.forward(-75)
>>> turtle.position()
(-50.00,0.00)
"""
self._go(distance)
def back(self, distance):
"""Move the turtle backward by distance.
Aliases: back | backward | bk
Argument:
distance -- a number
Move the turtle backward by distance ,opposite to the direction the
turtle is headed. Do not change the turtle's heading.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.backward(30)
>>> turtle.position()
(-30.00, 0.00)
"""
self._go(-distance)
def right(self, angle):
"""Turn turtle right by angle units.
Aliases: right | rt
Argument:
angle -- a number (integer or float)
Turn turtle right by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.right(45)
>>> turtle.heading()
337.0
"""
self._rotate(-angle)
def left(self, angle):
"""Turn turtle left by angle units.
Aliases: left | lt
Argument:
angle -- a number (integer or float)
Turn turtle left by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.left(45)
>>> turtle.heading()
67.0
"""
self._rotate(angle)
def pos(self):
"""Return the turtle's current location (x,y), as a Vec2D-vector.
Aliases: pos | position
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 240.00)
"""
return self._position
def xcor(self):
""" Return the turtle's x coordinate.
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.xcor()
50.0
"""
return self._position[0]
def ycor(self):
""" Return the turtle's y coordinate
---
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.ycor()
86.6025403784
"""
return self._position[1]
def goto(self, x, y=None):
"""Move turtle to an absolute position.
Aliases: setpos | setposition | goto:
Arguments:
x -- a number or a pair/vector of numbers
y -- a number None
call: goto(x, y) # two coordinates
--or: goto((x, y)) # a pair (tuple) of coordinates
--or: goto(vec) # e.g. as returned by pos()
Move turtle to an absolute position. If the pen is down,
a line will be drawn. The turtle's orientation does not change.
Example (for a Turtle instance named turtle):
>>> tp = turtle.pos()
>>> tp
(0.00, 0.00)
>>> turtle.setpos(60,30)
>>> turtle.pos()
(60.00,30.00)
>>> turtle.setpos((20,80))
>>> turtle.pos()
(20.00,80.00)
>>> turtle.setpos(tp)
>>> turtle.pos()
(0.00,0.00)
"""
if y is None:
self._goto(Vec2D(*x))
else:
self._goto(Vec2D(x, y))
def home(self):
"""Move turtle to the origin - coordinates (0,0).
No arguments.
Move turtle to the origin - coordinates (0,0) and set its
heading to its start-orientation (which depends on mode).
Example (for a Turtle instance named turtle):
>>> turtle.home()
"""
self.goto(0, 0)
self.setheading(0)
def setx(self, x):
"""Set the turtle's first coordinate to x
Argument:
x -- a number (integer or float)
Set the turtle's first coordinate to x, leave second coordinate
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 240.00)
>>> turtle.setx(10)
>>> turtle.position()
(10.00, 240.00)
"""
self._goto(Vec2D(x, self._position[1]))
def sety(self, y):
"""Set the turtle's second coordinate to y
Argument:
y -- a number (integer or float)
Set the turtle's first coordinate to x, second coordinate remains
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 40.00)
>>> turtle.sety(-10)
>>> turtle.position()
(0.00, -10.00)
"""
self._goto(Vec2D(self._position[0], y))
def distance(self, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 0.00)
>>> turtle.distance(30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> turtle.distance(pen)
77.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
return abs(pos - self._position)
def towards(self, x, y=None):
"""Return the angle of the line from the turtle's position to (x, y).
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Return the angle, between the line from turtle-position to position
specified by x, y and the turtle's start orientation. (Depends on
modes - "standard" or "logo")
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(10.00, 10.00)
>>> turtle.towards(0,0)
225.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
x, y = pos - self._position
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def heading(self):
""" Return the turtle's current heading.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.left(67)
>>> turtle.heading()
67.0
"""
x, y = self._orient
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle.
Aliases: setheading | seth
Argument:
to_angle -- a number (integer or float)
Set the orientation of the turtle to to_angle.
Here are some common directions in degrees:
standard - mode: logo-mode:
-------------------|--------------------
0 - east 0 - north
90 - north 90 - east
180 - west 180 - south
270 - south 270 - west
Example (for a Turtle instance named turtle):
>>> turtle.setheading(90)
>>> turtle.heading()
90
"""
angle = (to_angle - self.heading())*self._angleOrient
full = self._fullcircle
angle = (angle+full/2.)%full - full/2.
self._rotate(angle)
def circle(self, radius, extent = None, steps = None):
""" Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
tr = self._tracer()
dl = self._delay()
if speed == 0:
self._tracer(0, 0)
else:
self.speed(0)
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self._go(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
if speed == 0:
self._tracer(tr, dl)
self.speed(speed)
if self.undobuffer:
self.undobuffer.cumulate = False
## three dummy methods to be implemented by child class:
def speed(self, s=0):
"""dummy method - to be overwritten by child class"""
def _tracer(self, a=None, b=None):
"""dummy method - to be overwritten by child class"""
def _delay(self, n=None):
"""dummy method - to be overwritten by child class"""
fd = forward
bk = back
backward = back
rt = right
lt = left
position = pos
setpos = goto
setposition = goto
seth = setheading
class TPen(object):
"""Drawing part of the RawTurtle.
Implements drawing properties.
"""
def __init__(self, resizemode=_CFG["resizemode"]):
self._resizemode = resizemode # or "user" or "noresize"
self.undobuffer = None
TPen._reset(self)
def _reset(self, pencolor=_CFG["pencolor"],
fillcolor=_CFG["fillcolor"]):
self._pensize = 1
self._shown = True
self._pencolor = pencolor
self._fillcolor = fillcolor
self._drawing = True
self._speed = 3
self._stretchfactor = (1., 1.)
self._shearfactor = 0.
self._tilt = 0.
self._shapetrafo = (1., 0., 0., 1.)
self._outlinewidth = 1
def resizemode(self, rmode=None):
"""Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize'
"""
if rmode is None:
return self._resizemode
rmode = rmode.lower()
if rmode in ["auto", "user", "noresize"]:
self.pen(resizemode=rmode)
def pensize(self, width=None):
"""Set or return the line thickness.
Aliases: pensize | width
Argument:
width -- positive number
Set the line thickness to width or return it. If resizemode is set
to "auto" and turtleshape is a polygon, that polygon is drawn with
the same line thickness. If no argument is given, current pensize
is returned.
Example (for a Turtle instance named turtle):
>>> turtle.pensize()
1
>>> turtle.pensize(10) # from here on lines of width 10 are drawn
"""
if width is None:
return self._pensize
self.pen(pensize=width)
def penup(self):
"""Pull the pen up -- no drawing when moving.
Aliases: penup | pu | up
No argument
Example (for a Turtle instance named turtle):
>>> turtle.penup()
"""
if not self._drawing:
return
self.pen(pendown=False)
def pendown(self):
"""Pull the pen down -- drawing when moving.
Aliases: pendown | pd | down
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.pendown()
"""
if self._drawing:
return
self.pen(pendown=True)
def isdown(self):
"""Return True if pen is down, False if it's up.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.penup()
>>> turtle.isdown()
False
>>> turtle.pendown()
>>> turtle.isdown()
True
"""
return self._drawing
def speed(self, speed=None):
""" Return or set the turtle's speed.
Optional argument:
speed -- an integer in the range 0..10 or a speedstring (see below)
Set the turtle's speed to an integer value in the range 0 .. 10.
If no argument is given: return current speed.
If input is a number greater than 10 or smaller than 0.5,
speed is set to 0.
Speedstrings are mapped to speedvalues in the following way:
'fastest' : 0
'fast' : 10
'normal' : 6
'slow' : 3
'slowest' : 1
speeds from 1 to 10 enforce increasingly faster animation of
line drawing and turtle turning.
Attention:
speed = 0 : *no* animation takes place. forward/back makes turtle jump
and likewise left/right make the turtle turn instantly.
Example (for a Turtle instance named turtle):
>>> turtle.speed(3)
"""
speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }
if speed is None:
return self._speed
if speed in speeds:
speed = speeds[speed]
elif 0.5 < speed < 10.5:
speed = int(round(speed))
else:
speed = 0
self.pen(speed=speed)
def color(self, *args):
"""Return or set the pencolor and fillcolor.
Arguments:
Several input formats are allowed.
They use 0, 1, 2, or 3 arguments as follows:
color()
Return the current pencolor and the current fillcolor
as a pair of color specification strings as are returned
by pencolor and fillcolor.
color(colorstring), color((r,g,b)), color(r,g,b)
inputs as in pencolor, set both, fillcolor and pencolor,
to the given value.
color(colorstring1, colorstring2),
color((r1,g1,b1), (r2,g2,b2))
equivalent to pencolor(colorstring1) and fillcolor(colorstring2)
and analogously, if the other input format is used.
If turtleshape is a polygon, outline and interior of that polygon
is drawn with the newly set colors.
For mor info see: pencolor, fillcolor
Example (for a Turtle instance named turtle):
>>> turtle.color('red', 'green')
>>> turtle.color()
('red', 'green')
>>> colormode(255)
>>> color((40, 80, 120), (160, 200, 240))
>>> color()
('#285078', '#a0c8f0')
"""
if args:
l = len(args)
if l == 1:
pcolor = fcolor = args[0]
elif l == 2:
pcolor, fcolor = args
elif l == 3:
pcolor = fcolor = args
pcolor = self._colorstr(pcolor)
fcolor = self._colorstr(fcolor)
self.pen(pencolor=pcolor, fillcolor=fcolor)
else:
return self._color(self._pencolor), self._color(self._fillcolor)
def pencolor(self, *args):
""" Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
"""
if args:
color = self._colorstr(args)
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._color(self._pencolor)
def fillcolor(self, *args):
""" Return or set the fillcolor.
Arguments:
Four input formats are allowed:
- fillcolor()
Return the current fillcolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- fillcolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- fillcolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- fillcolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the interior of that polygon is drawn
with the newly set fillcolor.
Example (for a Turtle instance named turtle):
>>> turtle.fillcolor('violet')
>>> col = turtle.pencolor()
>>> turtle.fillcolor(col)
>>> turtle.fillcolor(0, .5, 0)
"""
if args:
color = self._colorstr(args)
if color == self._fillcolor:
return
self.pen(fillcolor=color)
else:
return self._color(self._fillcolor)
def showturtle(self):
"""Makes the turtle visible.
Aliases: showturtle | st
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> turtle.showturtle()
"""
self.pen(shown=True)
def hideturtle(self):
"""Makes the turtle invisible.
Aliases: hideturtle | ht
No argument.
It's a good idea to do this while you're in the
middle of a complicated drawing, because hiding
the turtle speeds up the drawing observably.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
"""
self.pen(shown=False)
def isvisible(self):
"""Return True if the Turtle is shown, False if it's hidden.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> print turtle.isvisible():
False
"""
return self._shown
def pen(self, pen=None, **pendict):
"""Return or set the pen's attributes.
Arguments:
pen -- a dictionary with some or all of the below listed keys.
**pendict -- one or more keyword-arguments with the below
listed keys as keywords.
Return or set the pen's attributes in a 'pen-dictionary'
with the following key/value pairs:
"shown" : True/False
"pendown" : True/False
"pencolor" : color-string or color-tuple
"fillcolor" : color-string or color-tuple
"pensize" : positive number
"speed" : number in range 0..10
"resizemode" : "auto" or "user" or "noresize"
"stretchfactor": (positive number, positive number)
"shearfactor": number
"outline" : positive number
"tilt" : number
This dictionary can be used as argument for a subsequent
pen()-call to restore the former pen-state. Moreover one
or more of these attributes can be provided as keyword-arguments.
This can be used to set several pen attributes in one statement.
Examples (for a Turtle instance named turtle):
>>> turtle.pen(fillcolor="black", pencolor="red", pensize=10)
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'black',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> penstate=turtle.pen()
>>> turtle.color("yellow","")
>>> turtle.penup()
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'yellow', 'pendown': False, 'fillcolor': '',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> p.pen(penstate, fillcolor="green")
>>> p.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'green',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
"""
_pd = {"shown" : self._shown,
"pendown" : self._drawing,
"pencolor" : self._pencolor,
"fillcolor" : self._fillcolor,
"pensize" : self._pensize,
"speed" : self._speed,
"resizemode" : self._resizemode,
"stretchfactor" : self._stretchfactor,
"shearfactor" : self._shearfactor,
"outline" : self._outlinewidth,
"tilt" : self._tilt
}
if not (pen or pendict):
return _pd
if isinstance(pen, dict):
p = pen
else:
p = {}
p.update(pendict)
_p_buf = {}
for key in p:
_p_buf[key] = _pd[key]
if self.undobuffer:
self.undobuffer.push(("pen", _p_buf))
newLine = False
if "pendown" in p:
if self._drawing != p["pendown"]:
newLine = True
if "pencolor" in p:
if isinstance(p["pencolor"], tuple):
p["pencolor"] = self._colorstr((p["pencolor"],))
if self._pencolor != p["pencolor"]:
newLine = True
if "pensize" in p:
if self._pensize != p["pensize"]:
newLine = True
if newLine:
self._newLine()
if "pendown" in p:
self._drawing = p["pendown"]
if "pencolor" in p:
self._pencolor = p["pencolor"]
if "pensize" in p:
self._pensize = p["pensize"]
if "fillcolor" in p:
if isinstance(p["fillcolor"], tuple):
p["fillcolor"] = self._colorstr((p["fillcolor"],))
self._fillcolor = p["fillcolor"]
if "speed" in p:
self._speed = p["speed"]
if "resizemode" in p:
self._resizemode = p["resizemode"]
if "stretchfactor" in p:
sf = p["stretchfactor"]
if isinstance(sf, (int, float)):
sf = (sf, sf)
self._stretchfactor = sf
if "shearfactor" in p:
self._shearfactor = p["shearfactor"]
if "outline" in p:
self._outlinewidth = p["outline"]
if "shown" in p:
self._shown = p["shown"]
if "tilt" in p:
self._tilt = p["tilt"]
if "stretchfactor" in p or "tilt" in p or "shearfactor" in p:
scx, scy = self._stretchfactor
shf = self._shearfactor
sa, ca = math.sin(self._tilt), math.cos(self._tilt)
self._shapetrafo = ( scx*ca, scy*(shf*ca + sa),
-scx*sa, scy*(ca - shf*sa))
self._update()
## three dummy methods to be implemented by child class:
def _newLine(self, usePos = True):
"""dummy method - to be overwritten by child class"""
def _update(self, count=True, forced=False):
"""dummy method - to be overwritten by child class"""
def _color(self, args):
"""dummy method - to be overwritten by child class"""
def _colorstr(self, args):
"""dummy method - to be overwritten by child class"""
width = pensize
up = penup
pu = penup
pd = pendown
down = pendown
st = showturtle
ht = hideturtle
class _TurtleImage(object):
"""Helper class: Datatype to store Turtle attributes
"""
def __init__(self, screen, shapeIndex):
self.screen = screen
self._type = None
self._setshape(shapeIndex)
def _setshape(self, shapeIndex):
screen = self.screen
self.shapeIndex = shapeIndex
if self._type == "polygon" == screen._shapes[shapeIndex]._type:
return
if self._type == "image" == screen._shapes[shapeIndex]._type:
return
if self._type in ["image", "polygon"]:
screen._delete(self._item)
elif self._type == "compound":
for item in self._item:
screen._delete(item)
self._type = screen._shapes[shapeIndex]._type
if self._type == "polygon":
self._item = screen._createpoly()
elif self._type == "image":
self._item = screen._createimage(screen._shapes["blank"]._data)
elif self._type == "compound":
self._item = [screen._createpoly() for item in
screen._shapes[shapeIndex]._data]
class RawTurtle(TPen, TNavigator):
"""Animation part of the RawTurtle.
Puts RawTurtle upon a TurtleScreen and provides tools for
its animation.
"""
screens = []
def __init__(self, canvas=None,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if isinstance(canvas, _Screen):
self.screen = canvas
elif isinstance(canvas, TurtleScreen):
if canvas not in RawTurtle.screens:
RawTurtle.screens.append(canvas)
self.screen = canvas
elif isinstance(canvas, (ScrolledCanvas, Canvas)):
for screen in RawTurtle.screens:
if screen.cv == canvas:
self.screen = screen
break
else:
self.screen = TurtleScreen(canvas)
RawTurtle.screens.append(self.screen)
else:
raise TurtleGraphicsError("bad canvas argument %s" % canvas)
screen = self.screen
TNavigator.__init__(self, screen.mode())
TPen.__init__(self)
screen._turtles.append(self)
self.drawingLineItem = screen._createline()
self.turtle = _TurtleImage(screen, shape)
self._poly = None
self._creatingPoly = False
self._fillitem = self._fillpath = None
self._shown = visible
self._hidden_from_screen = False
self.currentLineItem = screen._createline()
self.currentLine = [self._position]
self.items = [self.currentLineItem]
self.stampItems = []
self._undobuffersize = undobuffersize
self.undobuffer = Tbuffer(undobuffersize)
self._update()
def reset(self):
"""Delete the turtle's drawings and restore its default values.
No argument.
Delete the turtle's drawings from the screen, re-center the turtle
and set variables to the default values.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00,-22.00)
>>> turtle.heading()
100.0
>>> turtle.reset()
>>> turtle.position()
(0.00,0.00)
>>> turtle.heading()
0.0
"""
TNavigator.reset(self)
TPen._reset(self)
self._clear()
self._drawturtle()
self._update()
def setundobuffer(self, size):
"""Set or disable undobuffer.
Argument:
size -- an integer or None
If size is an integer an empty undobuffer of given size is installed.
Size gives the maximum number of turtle-actions that can be undone
by the undo() function.
If size is None, no undobuffer is present.
Example (for a Turtle instance named turtle):
>>> turtle.setundobuffer(42)
"""
if size is None or size <= 0:
self.undobuffer = None
else:
self.undobuffer = Tbuffer(size)
def undobufferentries(self):
"""Return count of entries in the undobuffer.
No argument.
Example (for a Turtle instance named turtle):
>>> while undobufferentries():
... undo()
"""
if self.undobuffer is None:
return 0
return self.undobuffer.nr_of_items()
def _clear(self):
"""Delete all of pen's drawings"""
self._fillitem = self._fillpath = None
for item in self.items:
self.screen._delete(item)
self.currentLineItem = self.screen._createline()
self.currentLine = []
if self._drawing:
self.currentLine.append(self._position)
self.items = [self.currentLineItem]
self.clearstamps()
self.setundobuffer(self._undobuffersize)
def clear(self):
"""Delete the turtle's drawings from the screen. Do not move turtle.
No arguments.
Delete the turtle's drawings from the screen. Do not move turtle.
State and position of the turtle as well as drawings of other
turtles are not affected.
Examples (for a Turtle instance named turtle):
>>> turtle.clear()
"""
self._clear()
self._update()
def _update_data(self):
self.screen._incrementudc()
if self.screen._updatecounter != 0:
return
if len(self.currentLine)>1:
self.screen._drawline(self.currentLineItem, self.currentLine,
self._pencolor, self._pensize)
def _update(self):
"""Perform a Turtle-data update.
"""
screen = self.screen
if screen._tracing == 0:
return
elif screen._tracing == 1:
self._update_data()
self._drawturtle()
screen._update() # TurtleScreenBase
screen._delay(screen._delayvalue) # TurtleScreenBase
else:
self._update_data()
if screen._updatecounter == 0:
for t in screen.turtles():
t._drawturtle()
screen._update()
def _tracer(self, flag=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a Turtle instance named turtle):
>>> turtle.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... turtle.fd(dist)
... turtle.rt(90)
... dist += 2
"""
return self.screen.tracer(flag, delay)
def _color(self, args):
return self.screen._color(args)
def _colorstr(self, args):
return self.screen._colorstr(args)
def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
if isinstance(args, str):
return args
try:
r, g, b = args
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
if self.screen._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(args))
return "#%02x%02x%02x" % (r, g, b)
def clone(self):
"""Create and return a clone of the turtle.
No argument.
Create and return a clone of the turtle with same position, heading
and turtle properties.
Example (for a Turtle instance named mick):
mick = Turtle()
joe = mick.clone()
"""
screen = self.screen
self._newLine(self._drawing)
turtle = self.turtle
self.screen = None
self.turtle = None # too make self deepcopy-able
q = deepcopy(self)
self.screen = screen
self.turtle = turtle
q.screen = screen
q.turtle = _TurtleImage(screen, self.turtle.shapeIndex)
screen._turtles.append(q)
ttype = screen._shapes[self.turtle.shapeIndex]._type
if ttype == "polygon":
q.turtle._item = screen._createpoly()
elif ttype == "image":
q.turtle._item = screen._createimage(screen._shapes["blank"]._data)
elif ttype == "compound":
q.turtle._item = [screen._createpoly() for item in
screen._shapes[self.turtle.shapeIndex]._data]
q.currentLineItem = screen._createline()
q._update()
return q
def shape(self, name=None):
"""Set turtle shape to shape with given name / return current shapename.
Optional argument:
name -- a string, which is a valid shapename
Set turtle shape to shape with given name or, if name is not given,
return name of current shape.
Shape with name must exist in the TurtleScreen's shape dictionary.
Initially there are the following polygon shapes:
'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'.
To learn about how to deal with shapes see Screen-method register_shape.
Example (for a Turtle instance named turtle):
>>> turtle.shape()
'arrow'
>>> turtle.shape("turtle")
>>> turtle.shape()
'turtle'
"""
if name is None:
return self.turtle.shapeIndex
if not name in self.screen.getshapes():
raise TurtleGraphicsError("There is no shape named %s" % name)
self.turtle._setshape(name)
self._update()
def shapesize(self, stretch_wid=None, stretch_len=None, outline=None):
"""Set/return turtle's stretchfactors/outline. Set resizemode to "user".
Optional arguments:
stretch_wid : positive number
stretch_len : positive number
outline : positive number
Return or set the pen's attributes x/y-stretchfactors and/or outline.
Set resizemode to "user".
If and only if resizemode is set to "user", the turtle will be displayed
stretched according to its stretchfactors:
stretch_wid is stretchfactor perpendicular to orientation
stretch_len is stretchfactor in direction of turtles orientation.
outline determines the width of the shapes's outline.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("user")
>>> turtle.shapesize(5, 5, 12)
>>> turtle.shapesize(outline=8)
"""
if stretch_wid is stretch_len is outline is None:
stretch_wid, stretch_len = self._stretchfactor
return stretch_wid, stretch_len, self._outlinewidth
if stretch_wid == 0 or stretch_len == 0:
raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero")
if stretch_wid is not None:
if stretch_len is None:
stretchfactor = stretch_wid, stretch_wid
else:
stretchfactor = stretch_wid, stretch_len
elif stretch_len is not None:
stretchfactor = self._stretchfactor[0], stretch_len
else:
stretchfactor = self._stretchfactor
if outline is None:
outline = self._outlinewidth
self.pen(resizemode="user",
stretchfactor=stretchfactor, outline=outline)
def shearfactor(self, shear=None):
"""Set or return the current shearfactor.
Optional argument: shear -- number, tangent of the shear angle
Shear the turtleshape according to the given shearfactor shear,
which is the tangent of the shear angle. DO NOT change the
turtle's heading (direction of movement).
If shear is not given: return the current shearfactor, i. e. the
tangent of the shear angle, by which lines parallel to the
heading of the turtle are sheared.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.shearfactor(0.5)
>>> turtle.shearfactor()
>>> 0.5
"""
if shear is None:
return self._shearfactor
self.pen(resizemode="user", shearfactor=shear)
def settiltangle(self, angle):
"""Rotate the turtleshape to point in the specified direction
Argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.settiltangle(45)
>>> stamp()
>>> turtle.fd(50)
>>> turtle.settiltangle(-45)
>>> stamp()
>>> turtle.fd(50)
"""
tilt = -angle * self._degreesPerAU * self._angleOrient
tilt = (tilt * math.pi / 180.0) % (2*math.pi)
self.pen(resizemode="user", tilt=tilt)
def tiltangle(self, angle=None):
"""Set or return the current tilt-angle.
Optional argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
If angle is not given: return the current tilt-angle, i. e. the angle
between the orientation of the turtleshape and the heading of the
turtle (its direction of movement).
Deprecated since Python 3.1
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(45)
>>> turtle.tiltangle()
"""
if angle is None:
tilt = -self._tilt * (180.0/math.pi) * self._angleOrient
return (tilt / self._degreesPerAU) % self._fullcircle
else:
self.settiltangle(angle)
def tilt(self, angle):
"""Rotate the turtleshape by angle.
Argument:
angle - a number
Rotate the turtleshape by angle from its current tilt-angle,
but do NOT change the turtle's heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(30)
>>> turtle.fd(50)
>>> turtle.tilt(30)
>>> turtle.fd(50)
"""
self.settiltangle(angle + self.tiltangle())
def shapetransform(self, t11=None, t12=None, t21=None, t22=None):
"""Set or return the current transformation matrix of the turtle shape.
Optional arguments: t11, t12, t21, t22 -- numbers.
If none of the matrix elements are given, return the transformation
matrix.
Otherwise set the given elements and transform the turtleshape
according to the matrix consisting of first row t11, t12 and
second row t21, 22.
Modify stretchfactor, shearfactor and tiltangle according to the
given matrix.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapesize(4,2)
>>> turtle.shearfactor(-0.5)
>>> turtle.shapetransform()
(4.0, -1.0, -0.0, 2.0)
"""
if t11 is t12 is t21 is t22 is None:
return self._shapetrafo
m11, m12, m21, m22 = self._shapetrafo
if t11 is not None: m11 = t11
if t12 is not None: m12 = t12
if t21 is not None: m21 = t21
if t22 is not None: m22 = t22
if t11 * t22 - t12 * t21 == 0:
raise TurtleGraphicsError("Bad shape transform matrix: must not be singular")
self._shapetrafo = (m11, m12, m21, m22)
alfa = math.atan2(-m21, m11) % (2 * math.pi)
sa, ca = math.sin(alfa), math.cos(alfa)
a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22,
sa*m11 + ca*m21, sa*m12 + ca*m22)
self._stretchfactor = a11, a22
self._shearfactor = a12/a22
self._tilt = alfa
self.pen(resizemode="user")
def _polytrafo(self, poly):
"""Computes transformed polygon shapes from a shape
according to current position and heading.
"""
screen = self.screen
p0, p1 = self._position
e0, e1 = self._orient
e = Vec2D(e0, e1 * screen.yscale / screen.xscale)
e0, e1 = (1.0 / abs(e)) * e
return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale)
for (x, y) in poly]
def get_shapepoly(self):
"""Return the current shape polygon as tuple of coordinate pairs.
No argument.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapetransform(4, -1, 0, 2)
>>> turtle.get_shapepoly()
((50, -20), (30, 20), (-50, 20), (-30, -20))
"""
shape = self.screen._shapes[self.turtle.shapeIndex]
if shape._type == "polygon":
return self._getshapepoly(shape._data, shape._type == "compound")
# else return None
def _getshapepoly(self, polygon, compound=False):
"""Calculate transformed shape polygon according to resizemode
and shapetransform.
"""
if self._resizemode == "user" or compound:
t11, t12, t21, t22 = self._shapetrafo
elif self._resizemode == "auto":
l = max(1, self._pensize/5.0)
t11, t12, t21, t22 = l, 0, 0, l
elif self._resizemode == "noresize":
return polygon
return tuple([(t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon])
def _drawturtle(self):
"""Manages the correct rendering of the turtle with respect to
its shape, resizemode, stretch and tilt etc."""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
titem = self.turtle._item
if self._shown and screen._updatecounter == 0 and screen._tracing > 0:
self._hidden_from_screen = False
tshape = shape._data
if ttype == "polygon":
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(titem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
screen._drawimage(titem, self._position, tshape)
elif ttype == "compound":
for item, (poly, fc, oc) in zip(titem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
else:
if self._hidden_from_screen:
return
if ttype == "polygon":
screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), "", "")
elif ttype == "image":
screen._drawimage(titem, self._position,
screen._shapes["blank"]._data)
elif ttype == "compound":
for item in titem:
screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), "", "")
self._hidden_from_screen = True
############################## stamp stuff ###############################
def stamp(self):
"""Stamp a copy of the turtleshape onto the canvas and return its id.
No argument.
Stamp a copy of the turtle shape onto the canvas at the current
turtle position. Return a stamp_id for that stamp, which can be
used to delete it by calling clearstamp(stamp_id).
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> turtle.stamp()
13
>>> turtle.fd(50)
"""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
tshape = shape._data
if ttype == "polygon":
stitem = screen._createpoly()
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(stitem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
stitem = screen._createimage("")
screen._drawimage(stitem, self._position, tshape)
elif ttype == "compound":
stitem = []
for element in tshape:
item = screen._createpoly()
stitem.append(item)
stitem = tuple(stitem)
for item, (poly, fc, oc) in zip(stitem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
self.stampItems.append(stitem)
self.undobuffer.push(("stamp", stitem))
return stitem
def _clearstamp(self, stampid):
"""does the work for clearstamp() and clearstamps()
"""
if stampid in self.stampItems:
if isinstance(stampid, tuple):
for subitem in stampid:
self.screen._delete(subitem)
else:
self.screen._delete(stampid)
self.stampItems.remove(stampid)
# Delete stampitem from undobuffer if necessary
# if clearstamp is called directly.
item = ("stamp", stampid)
buf = self.undobuffer
if item not in buf.buffer:
return
index = buf.buffer.index(item)
buf.buffer.remove(item)
if index <= buf.ptr:
buf.ptr = (buf.ptr - 1) % buf.bufsize
buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None])
def clearstamp(self, stampid):
"""Delete stamp with given stampid
Argument:
stampid - an integer, must be return value of previous stamp() call.
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> astamp = turtle.stamp()
>>> turtle.fd(50)
>>> turtle.clearstamp(astamp)
"""
self._clearstamp(stampid)
self._update()
def clearstamps(self, n=None):
"""Delete all or first/last n of turtle's stamps.
Optional argument:
n -- an integer
If n is None, delete all of pen's stamps,
else if n > 0 delete first n stamps
else if n < 0 delete last n stamps.
Example (for a Turtle instance named turtle):
>>> for i in range(8):
... turtle.stamp(); turtle.fd(30)
...
>>> turtle.clearstamps(2)
>>> turtle.clearstamps(-2)
>>> turtle.clearstamps()
"""
if n is None:
toDelete = self.stampItems[:]
elif n >= 0:
toDelete = self.stampItems[:n]
else:
toDelete = self.stampItems[n:]
for item in toDelete:
self._clearstamp(item)
self._update()
def _goto(self, end):
"""Move the pen to the point end, thereby drawing a line
if pen is down. All other methods for turtle movement depend
on this one.
"""
## Version with undo-stuff
go_modes = ( self._drawing,
self._pencolor,
self._pensize,
isinstance(self._fillpath, list))
screen = self.screen
undo_entry = ("go", self._position, end, go_modes,
(self.currentLineItem,
self.currentLine[:],
screen._pointlist(self.currentLineItem),
self.items[:])
)
if self.undobuffer:
self.undobuffer.push(undo_entry)
start = self._position
if self._speed and screen._tracing == 1:
diff = (end-start)
diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2
nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))
delta = diff * (1.0/nhops)
for n in range(1, nhops):
if n == 1:
top = True
else:
top = False
self._position = start + delta * n
if self._drawing:
screen._drawline(self.drawingLineItem,
(start, self._position),
self._pencolor, self._pensize, top)
self._update()
if self._drawing:
screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),
fill="", width=self._pensize)
# Turtle now at end,
if self._drawing: # now update currentLine
self.currentLine.append(end)
if isinstance(self._fillpath, list):
self._fillpath.append(end)
###### vererbung!!!!!!!!!!!!!!!!!!!!!!
self._position = end
if self._creatingPoly:
self._poly.append(end)
if len(self.currentLine) > 42: # 42! answer to the ultimate question
# of life, the universe and everything
self._newLine()
self._update() #count=True)
def _undogoto(self, entry):
"""Reverse a _goto. Used for undo()
"""
old, new, go_modes, coodata = entry
drawing, pc, ps, filling = go_modes
cLI, cL, pl, items = coodata
screen = self.screen
if abs(self._position - new) > 0.5:
print ("undogoto: HALLO-DA-STIMMT-WAS-NICHT!")
# restore former situation
self.currentLineItem = cLI
self.currentLine = cL
if pl == [(0, 0), (0, 0)]:
usepc = ""
else:
usepc = pc
screen._drawline(cLI, pl, fill=usepc, width=ps)
todelete = [i for i in self.items if (i not in items) and
(screen._type(i) == "line")]
for i in todelete:
screen._delete(i)
self.items.remove(i)
start = old
if self._speed and screen._tracing == 1:
diff = old - new
diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2
nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))
delta = diff * (1.0/nhops)
for n in range(1, nhops):
if n == 1:
top = True
else:
top = False
self._position = new + delta * n
if drawing:
screen._drawline(self.drawingLineItem,
(start, self._position),
pc, ps, top)
self._update()
if drawing:
screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),
fill="", width=ps)
# Turtle now at position old,
self._position = old
## if undo is done during creating a polygon, the last vertex
## will be deleted. if the polygon is entirely deleted,
## creatingPoly will be set to False.
## Polygons created before the last one will not be affected by undo()
if self._creatingPoly:
if len(self._poly) > 0:
self._poly.pop()
if self._poly == []:
self._creatingPoly = False
self._poly = None
if filling:
if self._fillpath == []:
self._fillpath = None
print("Unwahrscheinlich in _undogoto!")
elif self._fillpath is not None:
self._fillpath.pop()
self._update() #count=True)
def _rotate(self, angle):
"""Turns pen clockwise by angle.
"""
if self.undobuffer:
self.undobuffer.push(("rot", angle, self._degreesPerAU))
angle *= self._degreesPerAU
neworient = self._orient.rotate(angle)
tracing = self.screen._tracing
if tracing == 1 and self._speed > 0:
anglevel = 3.0 * self._speed
steps = 1 + int(abs(angle)/anglevel)
delta = 1.0*angle/steps
for _ in range(steps):
self._orient = self._orient.rotate(delta)
self._update()
self._orient = neworient
self._update()
def _newLine(self, usePos=True):
"""Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
"""
if len(self.currentLine) > 1:
self.screen._drawline(self.currentLineItem, self.currentLine,
self._pencolor, self._pensize)
self.currentLineItem = self.screen._createline()
self.items.append(self.currentLineItem)
else:
self.screen._drawline(self.currentLineItem, top=True)
self.currentLine = []
if usePos:
self.currentLine = [self._position]
def filling(self):
"""Return fillstate (True if filling, False else).
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> if turtle.filling():
... turtle.pensize(5)
... else:
... turtle.pensize(3)
"""
return isinstance(self._fillpath, list)
def begin_fill(self):
"""Called just before drawing a shape to be filled.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if not self.filling():
self._fillitem = self.screen._createpoly()
self.items.append(self._fillitem)
self._fillpath = [self._position]
self._newLine()
if self.undobuffer:
self.undobuffer.push(("beginfill", self._fillitem))
self._update()
def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if self.filling():
if len(self._fillpath) > 2:
self.screen._drawpoly(self._fillitem, self._fillpath,
fill=self._fillcolor)
if self.undobuffer:
self.undobuffer.push(("dofill", self._fillitem))
self._fillitem = self._fillpath = None
self._update()
def dot(self, size=None, *color):
"""Draw a dot with diameter size, using color.
Optional arguments:
size -- an integer >= 1 (if given)
color -- a colorstring or a numeric color tuple
Draw a circular dot with diameter size, using color.
If size is not given, the maximum of pensize+4 and 2*pensize is used.
Example (for a Turtle instance named turtle):
>>> turtle.dot()
>>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50)
"""
if not color:
if isinstance(size, (str, tuple)):
color = self._colorstr(size)
size = self._pensize + max(self._pensize, 4)
else:
color = self._pencolor
if not size:
size = self._pensize + max(self._pensize, 4)
else:
if size is None:
size = self._pensize + max(self._pensize, 4)
color = self._colorstr(color)
if hasattr(self.screen, "_dot"):
item = self.screen._dot(self._position, size, color)
self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("dot", item))
else:
pen = self.pen()
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
try:
if self.resizemode() == 'auto':
self.ht()
self.pendown()
self.pensize(size)
self.pencolor(color)
self.forward(0)
finally:
self.pen(pen)
if self.undobuffer:
self.undobuffer.cumulate = False
def _write(self, txt, align, font):
"""Performs the writing for write()
"""
item, end = self.screen._write(self._position, txt, align, font,
self._pencolor)
self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("wri", item))
return end
def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")):
"""Write text at the current turtle position.
Arguments:
arg -- info, which is to be written to the TurtleScreen
move (optional) -- True/False
align (optional) -- one of the strings "left", "center" or right"
font (optional) -- a triple (fontname, fontsize, fonttype)
Write text - the string representation of arg - at the current
turtle position according to align ("left", "center" or right")
and with the given font.
If move is True, the pen is moved to the bottom-right corner
of the text. By default, move is False.
Example (for a Turtle instance named turtle):
>>> turtle.write('Home = ', True, align="center")
>>> turtle.write((0,0), True)
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
end = self._write(str(arg), align.lower(), font)
if move:
x, y = self.pos()
self.setpos(end, y)
if self.undobuffer:
self.undobuffer.cumulate = False
def begin_poly(self):
"""Start recording the vertices of a polygon.
No argument.
Start recording the vertices of a polygon. Current turtle position
is first point of polygon.
Example (for a Turtle instance named turtle):
>>> turtle.begin_poly()
"""
self._poly = [self._position]
self._creatingPoly = True
def end_poly(self):
"""Stop recording the vertices of a polygon.
No argument.
Stop recording the vertices of a polygon. Current turtle position is
last point of polygon. This will be connected with the first point.
Example (for a Turtle instance named turtle):
>>> turtle.end_poly()
"""
self._creatingPoly = False
def get_poly(self):
"""Return the lastly recorded polygon.
No argument.
Example (for a Turtle instance named turtle):
>>> p = turtle.get_poly()
>>> turtle.register_shape("myFavouriteShape", p)
"""
## check if there is any poly?
if self._poly is not None:
return tuple(self._poly)
def getscreen(self):
"""Return the TurtleScreen object, the turtle is drawing on.
No argument.
Return the TurtleScreen object, the turtle is drawing on.
So TurtleScreen-methods can be called for that object.
Example (for a Turtle instance named turtle):
>>> ts = turtle.getscreen()
>>> ts
<turtle.TurtleScreen object at 0x0106B770>
>>> ts.bgcolor("pink")
"""
return self.screen
def getturtle(self):
"""Return the Turtleobject itself.
No argument.
Only reasonable use: as a function to return the 'anonymous turtle':
Example:
>>> pet = getturtle()
>>> pet.fd(50)
>>> pet
<turtle.Turtle object at 0x0187D810>
>>> turtles()
[<turtle.Turtle object at 0x0187D810>]
"""
return self
getpen = getturtle
################################################################
### screen oriented methods recurring to methods of TurtleScreen
################################################################
def _delay(self, delay=None):
"""Set delay value which determines speed of turtle animation.
"""
return self.screen.delay(delay)
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
num -- number of the mouse-button defaults to 1 (left mouse button).
add -- True or False. If True, new binding will be added, otherwise
it will replace a former binding.
Example for the anonymous turtle, i. e. the procedural way:
>>> def turn(x, y):
... left(360)
...
>>> onclick(turn) # Now clicking into the turtle will turn it.
>>> onclick(None) # event-binding will be removed
"""
self.screen._onclick(self.turtle._item, fun, btn, add)
self._update()
def onrelease(self, fun, btn=1, add=None):
"""Bind fun to mouse-button-release event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
num -- number of the mouse-button defaults to 1 (left mouse button).
Example (for a MyTurtle instance named joe):
>>> class MyTurtle(Turtle):
... def glow(self,x,y):
... self.fillcolor("red")
... def unglow(self,x,y):
... self.fillcolor("")
...
>>> joe = MyTurtle()
>>> joe.onclick(joe.glow)
>>> joe.onrelease(joe.unglow)
Clicking on joe turns fillcolor red, unclicking turns it to
transparent.
"""
self.screen._onrelease(self.turtle._item, fun, btn, add)
self._update()
def ondrag(self, fun, btn=1, add=None):
"""Bind fun to mouse-move event on this turtle on canvas.
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
num -- number of the mouse-button defaults to 1 (left mouse button).
Every sequence of mouse-move-events on a turtle is preceded by a
mouse-click event on that turtle.
Example (for a Turtle instance named turtle):
>>> turtle.ondrag(turtle.goto)
Subsequently clicking and dragging a Turtle will move it
across the screen thereby producing handdrawings (if pen is
down).
"""
self.screen._ondrag(self.turtle._item, fun, btn, add)
def _undo(self, action, data):
"""Does the main part of the work for undo()
"""
if self.undobuffer is None:
return
if action == "rot":
angle, degPAU = data
self._rotate(-angle*degPAU/self._degreesPerAU)
dummy = self.undobuffer.pop()
elif action == "stamp":
stitem = data[0]
self.clearstamp(stitem)
elif action == "go":
self._undogoto(data)
elif action in ["wri", "dot"]:
item = data[0]
self.screen._delete(item)
self.items.remove(item)
elif action == "dofill":
item = data[0]
self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)),
fill="", outline="")
elif action == "beginfill":
item = data[0]
self._fillitem = self._fillpath = None
if item in self.items:
self.screen._delete(item)
self.items.remove(item)
elif action == "pen":
TPen.pen(self, data[0])
self.undobuffer.pop()
def undo(self):
"""undo (repeatedly) the last turtle action.
No argument.
undo (repeatedly) the last turtle action.
Number of available undo actions is determined by the size of
the undobuffer.
Example (for a Turtle instance named turtle):
>>> for i in range(4):
... turtle.fd(50); turtle.lt(80)
...
>>> for i in range(8):
... turtle.undo()
...
"""
if self.undobuffer is None:
return
item = self.undobuffer.pop()
action = item[0]
data = item[1:]
if action == "seq":
while data:
item = data.pop()
self._undo(item[0], item[1:])
else:
self._undo(action, data)
turtlesize = shapesize
RawPen = RawTurtle
### Screen - Singleton ########################
def Screen():
"""Return the singleton screen object.
If none exists at the moment, create a new one and return it,
else return the existing one."""
if Turtle._screen is None:
Turtle._screen = _Screen()
return Turtle._screen
class _Screen(TurtleScreen):
_root = None
_canvas = None
_title = _CFG["title"]
def __init__(self):
# XXX there is no need for this code to be conditional,
# as there will be only a single _Screen instance, anyway
# XXX actually, the turtle demo is injecting root window,
# so perhaps the conditional creation of a root should be
# preserved (perhaps by passing it as an optional parameter)
if _Screen._root is None:
_Screen._root = self._root = _Root()
self._root.title(_Screen._title)
self._root.ondestroy(self._destroy)
if _Screen._canvas is None:
width = _CFG["width"]
height = _CFG["height"]
canvwidth = _CFG["canvwidth"]
canvheight = _CFG["canvheight"]
leftright = _CFG["leftright"]
topbottom = _CFG["topbottom"]
self._root.setupcanvas(width, height, canvwidth, canvheight)
_Screen._canvas = self._root._getcanvas()
TurtleScreen.__init__(self, _Screen._canvas)
self.setup(width, height, leftright, topbottom)
def setup(self, width=_CFG["width"], height=_CFG["height"],
startx=_CFG["leftright"], starty=_CFG["topbottom"]):
""" Set the size and position of the main window.
Arguments:
width: as integer a size in pixels, as float a fraction of the screen.
Default is 50% of screen.
height: as integer the height in pixels, as float a fraction of the
screen. Default is 75% of screen.
startx: if positive, starting position in pixels from the left
edge of the screen, if negative from the right edge
Default, startx=None is to center window horizontally.
starty: if positive, starting position in pixels from the top
edge of the screen, if negative from the bottom edge
Default, starty=None is to center window vertically.
Examples (for a Screen instance named screen):
>>> screen.setup (width=200, height=200, startx=0, starty=0)
sets window to 200x200 pixels, in upper left of screen
>>> screen.setup(width=.75, height=0.5, startx=None, starty=None)
sets window to 75% of screen by 50% of screen and centers
"""
if not hasattr(self._root, "set_geometry"):
return
sw = self._root.win_width()
sh = self._root.win_height()
if isinstance(width, float) and 0 <= width <= 1:
width = sw*width
if startx is None:
startx = (sw - width) / 2
if isinstance(height, float) and 0 <= height <= 1:
height = sh*height
if starty is None:
starty = (sh - height) / 2
self._root.set_geometry(width, height, startx, starty)
self.update()
def title(self, titlestring):
"""Set title of turtle-window
Argument:
titlestring -- a string, to appear in the titlebar of the
turtle graphics window.
This is a method of Screen-class. Not available for TurtleScreen-
objects.
Example (for a Screen instance named screen):
>>> screen.title("Welcome to the turtle-zoo!")
"""
if _Screen._root is not None:
_Screen._root.title(titlestring)
_Screen._title = titlestring
def _destroy(self):
root = self._root
if root is _Screen._root:
Turtle._pen = None
Turtle._screen = None
_Screen._root = None
_Screen._canvas = None
TurtleScreen._RUNNING = True
root.destroy()
def bye(self):
"""Shut the turtlegraphics window.
Example (for a TurtleScreen instance named screen):
>>> screen.bye()
"""
self._destroy()
def exitonclick(self):
"""Go into mainloop until the mouse is clicked.
No arguments.
Bind bye() method to mouseclick on TurtleScreen.
If "using_IDLE" - value in configuration dictionary is False
(default value), enter mainloop.
If IDLE with -n switch (no subprocess) is used, this value should be
set to True in turtle.cfg. In this case IDLE's mainloop
is active also for the client script.
This is a method of the Screen-class and not available for
TurtleScreen instances.
Example (for a Screen instance named screen):
>>> screen.exitonclick()
"""
def exitGracefully(x, y):
"""Screen.bye() with two dummy-parameters"""
self.bye()
self.onclick(exitGracefully)
if _CFG["using_IDLE"]:
return
try:
mainloop()
except AttributeError:
exit(0)
class Turtle(RawTurtle):
"""RawTurtle auto-creating (scrolled) canvas.
When a Turtle object is created or a function derived from some
Turtle method is called a TurtleScreen object is automatically created.
"""
_pen = None
_screen = None
def __init__(self,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if Turtle._screen is None:
Turtle._screen = Screen()
RawTurtle.__init__(self, Turtle._screen,
shape=shape,
undobuffersize=undobuffersize,
visible=visible)
Pen = Turtle
def _getpen():
"""Create the 'anonymous' turtle if not already present."""
if Turtle._pen is None:
Turtle._pen = Turtle()
return Turtle._pen
def _getscreen():
"""Create a TurtleScreen if not already present."""
if Turtle._screen is None:
Turtle._screen = Screen()
return Turtle._screen
def write_docstringdict(filename="turtle_docstringdict"):
"""Create and write docstring-dictionary to file.
Optional argument:
filename -- a string, used as filename
default value is turtle_docstringdict
Has to be called explicitly, (not used by the turtle-graphics classes)
The docstring dictionary will be written to the Python script <filname>.py
It is intended to serve as a template for translation of the docstrings
into different languages.
"""
docsdict = {}
for methodname in _tg_screen_functions:
key = "_Screen."+methodname
docsdict[key] = eval(key).__doc__
for methodname in _tg_turtle_functions:
key = "Turtle."+methodname
docsdict[key] = eval(key).__doc__
with open("%s.py" % filename,"w") as f:
keys = sorted([x for x in docsdict.keys()
if x.split('.')[1] not in _alias_list])
f.write('docsdict = {\n\n')
for key in keys[:-1]:
f.write('%s :\n' % repr(key))
f.write(' """%s\n""",\n\n' % docsdict[key])
key = keys[-1]
f.write('%s :\n' % repr(key))
f.write(' """%s\n"""\n\n' % docsdict[key])
f.write("}\n")
f.close()
def read_docstrings(lang):
"""Read in docstrings from lang-specific docstring dictionary.
Transfer docstrings, translated to lang, from a dictionary-file
to the methods of classes Screen and Turtle and - in revised form -
to the corresponding functions.
"""
modname = "turtle_docstringdict_%(language)s" % {'language':lang.lower()}
module = __import__(modname)
docsdict = module.docsdict
for key in docsdict:
try:
# eval(key).im_func.__doc__ = docsdict[key]
eval(key).__doc__ = docsdict[key]
except:
print("Bad docstring-entry: %s" % key)
_LANGUAGE = _CFG["language"]
try:
if _LANGUAGE != "english":
read_docstrings(_LANGUAGE)
except ImportError:
print("Cannot find docsdict for", _LANGUAGE)
except:
print ("Unknown Error when trying to import %s-docstring-dictionary" %
_LANGUAGE)
def getmethparlist(ob):
"""Get strings describing the arguments for the given object
Returns a pair of strings representing function parameter lists
including parenthesis. The first string is suitable for use in
function definition and the second is suitable for use in function
call. The "self" parameter is not included.
"""
defText = callText = ""
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
# Try and build one for Python defined functions
args, varargs, varkw = inspect.getargs(ob.__code__)
items2 = args[1:]
realArgs = args[1:]
defaults = ob.__defaults__ or []
defaults = ["=%r" % (value,) for value in defaults]
defaults = [""] * (len(realArgs)-len(defaults)) + defaults
items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]
if varargs is not None:
items1.append("*" + varargs)
items2.append("*" + varargs)
if varkw is not None:
items1.append("**" + varkw)
items2.append("**" + varkw)
defText = ", ".join(items1)
defText = "(%s)" % defText
callText = ", ".join(items2)
callText = "(%s)" % callText
return defText, callText
def _turtle_docrevise(docstr):
"""To reduce docstrings from RawTurtle class for functions
"""
import re
if docstr is None:
return None
turtlename = _CFG["exampleturtle"]
newdocstr = docstr.replace("%s." % turtlename,"")
parexp = re.compile(r' \(.+ %s\):' % turtlename)
newdocstr = parexp.sub(":", newdocstr)
return newdocstr
def _screen_docrevise(docstr):
"""To reduce docstrings from TurtleScreen class for functions
"""
import re
if docstr is None:
return None
screenname = _CFG["examplescreen"]
newdocstr = docstr.replace("%s." % screenname,"")
parexp = re.compile(r' \(.+ %s\):' % screenname)
newdocstr = parexp.sub(":", newdocstr)
return newdocstr
## The following mechanism makes all methods of RawTurtle and Turtle available
## as functions. So we can enhance, change, add, delete methods to these
## classes and do not need to change anything here.
for methodname in _tg_screen_functions:
pl1, pl2 = getmethparlist(eval('_Screen.' + methodname))
if pl1 == "":
print(">>>>>>", pl1, pl2)
continue
defstr = ("def %(key)s%(pl1)s: return _getscreen().%(key)s%(pl2)s" %
{'key':methodname, 'pl1':pl1, 'pl2':pl2})
exec(defstr)
eval(methodname).__doc__ = _screen_docrevise(eval('_Screen.'+methodname).__doc__)
for methodname in _tg_turtle_functions:
pl1, pl2 = getmethparlist(eval('Turtle.' + methodname))
if pl1 == "":
print(">>>>>>", pl1, pl2)
continue
defstr = ("def %(key)s%(pl1)s: return _getpen().%(key)s%(pl2)s" %
{'key':methodname, 'pl1':pl1, 'pl2':pl2})
exec(defstr)
eval(methodname).__doc__ = _turtle_docrevise(eval('Turtle.'+methodname).__doc__)
done = mainloop
if __name__ == "__main__":
def switchpen():
if isdown():
pu()
else:
pd()
def demo1():
"""Demo of old turtle.py - module"""
reset()
tracer(True)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
begin_fill()
for _ in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
end_fill()
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(False)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
tracer(True)
begin_fill()
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
end_fill()
# more text
def demo2():
"""Demo of some new features."""
speed(1)
st()
pensize(3)
setheading(towards(0, 0))
radius = distance(0, 0)/2.0
rt(90)
for _ in range(18):
switchpen()
circle(radius, 10)
write("wait a moment...")
while undobufferentries():
undo()
reset()
lt(90)
colormode(255)
laenge = 10
pencolor("green")
pensize(3)
lt(180)
for i in range(-2, 16):
if i > 0:
begin_fill()
fillcolor(255-15*i, 0, 15*i)
for _ in range(3):
fd(laenge)
lt(120)
end_fill()
laenge += 10
lt(15)
speed((speed()+1)%12)
#end_fill()
lt(120)
pu()
fd(70)
rt(30)
pd()
color("red","yellow")
speed(0)
begin_fill()
for _ in range(4):
circle(50, 90)
rt(90)
fd(30)
rt(90)
end_fill()
lt(90)
pu()
fd(30)
pd()
shape("turtle")
tri = getturtle()
tri.resizemode("auto")
turtle = Turtle()
turtle.resizemode("auto")
turtle.shape("turtle")
turtle.reset()
turtle.left(90)
turtle.speed(0)
turtle.up()
turtle.goto(280, 40)
turtle.lt(30)
turtle.down()
turtle.speed(6)
turtle.color("blue","orange")
turtle.pensize(2)
tri.speed(6)
setheading(towards(turtle))
count = 1
while tri.distance(turtle) > 4:
turtle.fd(3.5)
turtle.lt(0.6)
tri.setheading(tri.towards(turtle))
tri.fd(4)
if count % 20 == 0:
turtle.stamp()
tri.stamp()
switchpen()
count += 1
tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right")
tri.pencolor("black")
tri.pencolor("red")
def baba(xdummy, ydummy):
clearscreen()
bye()
time.sleep(2)
while undobufferentries():
tri.undo()
turtle.undo()
tri.fd(50)
tri.write(" Click me!", font = ("Courier", 12, "bold") )
tri.onclick(baba, 1)
demo1()
demo2()
exitonclick()
| {
"content_hash": "470a83bea6e7dbba27e2944c5b36bc21",
"timestamp": "",
"source": "github",
"line_count": 4116,
"max_line_length": 109,
"avg_line_length": 34.629737609329446,
"alnum_prop": 0.5504995229275411,
"repo_name": "samuelhavron/heroku-buildpack-python",
"id": "f4400c90fd7345d3311f1663c704b0efbdb74880",
"size": "143547",
"binary": false,
"copies": "68",
"ref": "refs/heads/master",
"path": "Python-3.4.3/Lib/turtle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "594205"
},
{
"name": "Batchfile",
"bytes": "18943"
},
{
"name": "C",
"bytes": "16647302"
},
{
"name": "C++",
"bytes": "176362"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "Groff",
"bytes": "255056"
},
{
"name": "HTML",
"bytes": "130855"
},
{
"name": "JavaScript",
"bytes": "10598"
},
{
"name": "M4",
"bytes": "214312"
},
{
"name": "Makefile",
"bytes": "196708"
},
{
"name": "Objective-C",
"bytes": "33060"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24212132"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "488285"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="autocolorscale", parent_name="isosurface", **kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
| {
"content_hash": "4a8b28cc0a12e938c3ddf9887a5c9184",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 35.357142857142854,
"alnum_prop": 0.6161616161616161,
"repo_name": "plotly/plotly.py",
"id": "686c15e377e8b181307973815d8ab24ea8989fc2",
"size": "495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/isosurface/_autocolorscale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
__author__ = 'privat'
from PyQt4.QtGui import QTextCursor
class OutputWriter():
def __init__(self, text_edit):
self.text_edit = text_edit
self.text_edit.setPlainText('')
def write(self, text):
self.text_edit.moveCursor(QTextCursor.End)
self.text_edit.insertPlainText(text) | {
"content_hash": "299538644bedd473a64a1bed93af22da",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 50,
"avg_line_length": 22.642857142857142,
"alnum_prop": 0.6498422712933754,
"repo_name": "sem23/roslab_ide",
"id": "4ce920a441c6715d7b41316536a0a37965687310",
"size": "317",
"binary": false,
"copies": "1",
"ref": "refs/heads/hydro",
"path": "src/roslab_ide/helper/OutputWriter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "196906"
}
],
"symlink_target": ""
} |
"""Tests for genomics.deepvariant.model_eval."""
import os
from unittest import mock
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import six
import tensorflow as tf
from tensorflow import estimator as tf_estimator
from deepvariant import data_providers_test
from deepvariant import model_eval
from deepvariant import testdata
from deepvariant.testing import tf_test_utils
FLAGS = flags.FLAGS
# Note that this test suite is invoked twice, with --use_tpu set both ways.
def setUpModule():
testdata.init()
class ModelEvalTest(
six.with_metaclass(parameterized.TestGeneratorMetaclass, tf.test.TestCase)):
def setUp(self):
self.checkpoint_dir = tf.compat.v1.test.get_temp_dir()
# Use this to generate a random name. The framework
# will create the directory under self.checkpoint_dir.
self.eval_name = os.path.basename(tf.compat.v1.test.get_temp_dir())
@parameterized.parameters(['inception_v3'])
@flagsaver.flagsaver
@mock.patch('deepvariant.data_providers.'
'get_input_fn_from_dataset')
def test_end2end(self, model_name, mock_get_input_fn_from_dataset):
"""End-to-end test of model_eval."""
tf_test_utils.write_fake_checkpoint('inception_v3', self.test_session(),
self.checkpoint_dir,
FLAGS.moving_average_decay)
# Start up eval, loading that checkpoint.
FLAGS.batch_size = 2
FLAGS.checkpoint_dir = self.checkpoint_dir
FLAGS.eval_name = self.eval_name
FLAGS.max_ckpt_to_evaluate = 0
FLAGS.max_examples = 2
FLAGS.best_checkpoint_metric = 'F1/All'
FLAGS.model_name = model_name
FLAGS.dataset_config_pbtxt = '/path/to/mock.pbtxt'
FLAGS.master = ''
# Always try to read in compressed inputs to stress that case. Uncompressed
# inputs are certain to work. This test is expensive to run, so we want to
# minimize the number of times we need to run this.
mock_get_input_fn_from_dataset.return_value = (
data_providers_test.make_golden_dataset(
compressed_inputs=True, use_tpu=FLAGS.use_tpu))
model_eval.main(0)
mock_get_input_fn_from_dataset.assert_called_once_with(
dataset_config_filename=FLAGS.dataset_config_pbtxt,
mode=tf_estimator.ModeKeys.EVAL,
use_tpu=FLAGS.use_tpu)
self.assertTrue(
tf_test_utils.check_file_exists(
'best_checkpoint.txt', eval_name=self.eval_name))
self.assertTrue(
tf_test_utils.check_file_exists(
'best_checkpoint.metrics', eval_name=self.eval_name))
self.assertEqual(
tf.train.load_checkpoint(self.checkpoint_dir).get_tensor('global_step'),
0)
# Using a constant model, check that running an eval returns the expected
# metrics.
@flagsaver.flagsaver
@mock.patch(
'deepvariant.model_eval.checkpoints_iterator')
@mock.patch('deepvariant.data_providers.'
'get_input_fn_from_dataset')
def test_fixed_eval_sees_the_same_evals(self, mock_get_input_fn_from_dataset,
mock_checkpoints_iterator):
dataset = data_providers_test.make_golden_dataset(use_tpu=FLAGS.use_tpu)
n_checkpoints = 3
checkpoints = [
tf_test_utils.write_fake_checkpoint(
'constant',
self.test_session(),
self.checkpoint_dir,
FLAGS.moving_average_decay,
global_step=i,
name='model' + str(i)) for i in range(n_checkpoints)
]
# Setup our mocks.
mock_checkpoints_iterator.return_value = checkpoints
mock_get_input_fn_from_dataset.return_value = dataset
# Start up eval, loading that checkpoint.
FLAGS.batch_size = 2
FLAGS.checkpoint_dir = self.checkpoint_dir
FLAGS.eval_name = self.eval_name
FLAGS.max_ckpt_to_evaluate = n_checkpoints - 1
FLAGS.model_name = 'constant'
FLAGS.dataset_config_pbtxt = '/path/to/mock.pbtxt'
FLAGS.master = ''
model_eval.main(0)
self.assertEqual(
tf.train.load_checkpoint(self.checkpoint_dir).get_tensor('global_step'),
n_checkpoints - 1)
self.assertEqual(mock_get_input_fn_from_dataset.call_args_list, [
mock.call(
use_tpu=FLAGS.use_tpu,
dataset_config_filename=FLAGS.dataset_config_pbtxt,
mode=tf_estimator.ModeKeys.EVAL)
])
metrics = [
model_eval.read_metrics(checkpoint, eval_name=FLAGS.eval_name)
for checkpoint in checkpoints
]
# Check that our metrics are what we expect them to be.
# See internal for details on how to compute these counts:
# Counts of labels in our golden dataset:
# 1 0
# 12 1
# 35 2
expected_values_for_all_exact = {
# We have 12 correct calls [there are 12 variants with a label of 1] and
# 1 label 0 + 35 with a label of 2, so we have an accuracy of 12 / 48,
# which is 0.25.
'Accuracy/All': 0.25,
# We don't have any FNs because we call everything het.
'FNs/All': 0,
# Two of our labels are 0, which we call het, giving us 2 FP.
'FPs/All': 1.0,
# We call everything as het, so the recall has to be 1.
'Recall/All': 1.0,
# TODO: include this metric when we upgrade to TF 1.5.
# # We don't call anything but hets, so TNs has to be 0.
# 'TNs/All': 0,
# We find 47 positives, so this has to be 47.
'TPs/All': 47,
}
for key, expected_value in expected_values_for_all_exact.items():
print(str(key) + '=' + str(metrics[0][key]))
for key, expected_value in expected_values_for_all_exact.items():
self.assertEqual(metrics[0][key], expected_value)
expected_values_for_all_close = {
# We called 47 / 48 correctly ~ 0.979167
'Precision/All': 0.979167,
# We called (2 * 47 / 48) / (1 + 47 / 48) correctly ~ 0.989474
'F1/All': 0.989474,
}
for key, expected_value in expected_values_for_all_close.items():
self.assertAlmostEqual(metrics[0][key], expected_value, places=6)
for m1, m2 in zip(metrics, metrics[1:]):
# Remove global_step from comparison first.
m1.pop('global_step', None)
m2.pop('global_step', None)
self.assertEqual(m1, m2)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "4249200a17759c31bb0d752b0a67290d",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 80,
"avg_line_length": 36.71264367816092,
"alnum_prop": 0.6476205385097057,
"repo_name": "google/deepvariant",
"id": "3296d72e0c4fbf2d99ed6dba805130dd7a28d833",
"size": "7910",
"binary": false,
"copies": "1",
"ref": "refs/heads/r1.4",
"path": "deepvariant/model_eval_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "587559"
},
{
"name": "Dockerfile",
"bytes": "9270"
},
{
"name": "Python",
"bytes": "1617393"
},
{
"name": "Shell",
"bytes": "91210"
},
{
"name": "Starlark",
"bytes": "75694"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Symbol(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.mapbox.layer"
_path_str = "layout.mapbox.layer.symbol"
_valid_props = {"icon", "iconsize", "placement", "text", "textfont", "textposition"}
# icon
# ----
@property
def icon(self):
"""
Sets the symbol icon image (mapbox.layer.layout.icon-image).
Full list: https://www.mapbox.com/maki-icons/
The 'icon' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["icon"]
@icon.setter
def icon(self, val):
self["icon"] = val
# iconsize
# --------
@property
def iconsize(self):
"""
Sets the symbol icon size (mapbox.layer.layout.icon-size). Has
an effect only when `type` is set to "symbol".
The 'iconsize' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["iconsize"]
@iconsize.setter
def iconsize(self, val):
self["iconsize"] = val
# placement
# ---------
@property
def placement(self):
"""
Sets the symbol and/or text placement
(mapbox.layer.layout.symbol-placement). If `placement` is
"point", the label is placed where the geometry is located If
`placement` is "line", the label is placed along the line of
the geometry If `placement` is "line-center", the label is
placed on the center of the geometry
The 'placement' property is an enumeration that may be specified as:
- One of the following enumeration values:
['point', 'line', 'line-center']
Returns
-------
Any
"""
return self["placement"]
@placement.setter
def placement(self, val):
self["placement"] = val
# text
# ----
@property
def text(self):
"""
Sets the symbol text (mapbox.layer.layout.text-field).
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the icon text font (color=mapbox.layer.paint.text-color,
size=mapbox.layer.layout.text-size). Has an effect only when
`type` is set to "symbol".
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.mapbox.layer.symbol.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.mapbox.layer.symbol.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
Returns
-------
Any
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
icon
Sets the symbol icon image (mapbox.layer.layout.icon-
image). Full list: https://www.mapbox.com/maki-icons/
iconsize
Sets the symbol icon size (mapbox.layer.layout.icon-
size). Has an effect only when `type` is set to
"symbol".
placement
Sets the symbol and/or text placement
(mapbox.layer.layout.symbol-placement). If `placement`
is "point", the label is placed where the geometry is
located If `placement` is "line", the label is placed
along the line of the geometry If `placement` is "line-
center", the label is placed on the center of the
geometry
text
Sets the symbol text (mapbox.layer.layout.text-field).
textfont
Sets the icon text font (color=mapbox.layer.paint.text-
color, size=mapbox.layer.layout.text-size). Has an
effect only when `type` is set to "symbol".
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
"""
def __init__(
self,
arg=None,
icon=None,
iconsize=None,
placement=None,
text=None,
textfont=None,
textposition=None,
**kwargs,
):
"""
Construct a new Symbol object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.mapbox.layer.Symbol`
icon
Sets the symbol icon image (mapbox.layer.layout.icon-
image). Full list: https://www.mapbox.com/maki-icons/
iconsize
Sets the symbol icon size (mapbox.layer.layout.icon-
size). Has an effect only when `type` is set to
"symbol".
placement
Sets the symbol and/or text placement
(mapbox.layer.layout.symbol-placement). If `placement`
is "point", the label is placed where the geometry is
located If `placement` is "line", the label is placed
along the line of the geometry If `placement` is "line-
center", the label is placed on the center of the
geometry
text
Sets the symbol text (mapbox.layer.layout.text-field).
textfont
Sets the icon text font (color=mapbox.layer.paint.text-
color, size=mapbox.layer.layout.text-size). Has an
effect only when `type` is set to "symbol".
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
Returns
-------
Symbol
"""
super(Symbol, self).__init__("symbol")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.mapbox.layer.Symbol
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.mapbox.layer.Symbol`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("icon", None)
_v = icon if icon is not None else _v
if _v is not None:
self["icon"] = _v
_v = arg.pop("iconsize", None)
_v = iconsize if iconsize is not None else _v
if _v is not None:
self["iconsize"] = _v
_v = arg.pop("placement", None)
_v = placement if placement is not None else _v
if _v is not None:
self["placement"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "60ad50e1798a5abe4ad8115d4a69a7c1",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 89,
"avg_line_length": 32.25714285714286,
"alnum_prop": 0.537643932683791,
"repo_name": "plotly/plotly.py",
"id": "dc3ed43c4458c77116537d6e08d4f8ae2e5e74cf",
"size": "10161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/layout/mapbox/layer/_symbol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
© 2014 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from collections import defaultdict
from copy import copy
import math
from luminol import exceptions
from luminol.algorithms.anomaly_detector_algorithms import AnomalyDetectorAlgorithm
from luminol.constants import *
from luminol.modules.time_series import TimeSeries
class BitmapDetector(AnomalyDetectorAlgorithm):
"""
Bitmap Algorithm.
This method breaks time series into chunks and uses the frequency of similar chunks
to determine anomaly scores.
The ideas are from this paper:
Assumption-Free Anomaly Detection in Time Series(http://alumni.cs.ucr.edu/~ratana/SSDBM05.pdf).
"""
def __init__(self, time_series, baseline_time_series=None, precision=None, lag_window_size=None,
future_window_size=None, chunk_size=None):
"""
Initializer
:param TimeSeries time_series: a TimeSeries object.
:param TimeSeries baseline_time_series: baseline TimeSeries.
:param int precision: how many sections to categorize values.
:param int lag_window_size: lagging window size.
:param int future_window_size: future window size.
:param int chunk_size: chunk size.
"""
super(BitmapDetector, self).__init__(self.__class__.__name__, time_series, baseline_time_series)
self.precision = precision if precision and precision > 0 else DEFAULT_BITMAP_PRECISION
self.chunk_size = chunk_size if chunk_size and chunk_size > 0 else DEFAULT_BITMAP_CHUNK_SIZE
if lag_window_size:
self.lag_window_size = lag_window_size
else:
self.lag_window_size = int(self.time_series_length * DEFAULT_BITMAP_LAGGING_WINDOW_SIZE_PCT)
if future_window_size:
self.future_window_size = future_window_size
else:
self.future_window_size = int(self.time_series_length * DEFAULT_BITMAP_LEADING_WINDOW_SIZE_PCT)
self._sanity_check()
def _sanity_check(self):
"""
Check if there are enough data points.
"""
windows = self.lag_window_size + self.future_window_size
if (not self.lag_window_size or not self.future_window_size
or self.time_series_length < windows or windows < DEFAULT_BITMAP_MINIMAL_POINTS_IN_WINDOWS):
raise exceptions.NotEnoughDataPoints
# If window size is too big, too many data points will be assigned a score of 0 in the first lag window
# and the last future window.
if self.lag_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.lag_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS
if self.future_window_size > DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS:
self.future_window_size = DEFAULT_BITMAP_MAXIMAL_POINTS_IN_WINDOWS
def _generate_SAX_single(self, sections, value):
"""
Generate SAX representation(Symbolic Aggregate approXimation) for a single data point.
Read more about it here: Assumption-Free Anomaly Detection in Time Series(http://alumni.cs.ucr.edu/~ratana/SSDBM05.pdf).
:param dict sections: value sections.
:param float value: value to be categorized.
:return str: a SAX representation.
"""
sax = 0
for section_number in sections.keys():
section_lower_bound = sections[section_number]
if value >= section_lower_bound:
sax = section_number
else:
break
return str(sax)
def _generate_SAX(self):
"""
Generate SAX representation for all values of the time series.
"""
sections = {}
self.value_min = self.time_series.min()
self.value_max = self.time_series.max()
# Break the whole value range into different sections.
section_height = (self.value_max - self.value_min) / self.precision
for section_number in range(self.precision):
sections[section_number] = self.value_min + section_number * section_height
# Generate SAX representation.
self.sax = ''.join(self._generate_SAX_single(sections, value) for value in self.time_series.values)
def _construct_SAX_chunk_dict(self, sax):
"""
Form a chunk frequency dictionary from a SAX representation.
:param str sax: a SAX representation.
:return dict: frequency dictionary for chunks in the SAX representation.
"""
frequency = defaultdict(int)
chunk_size = self.chunk_size
length = len(sax)
for i in range(length):
if i + chunk_size <= length:
chunk = sax[i: i + chunk_size]
frequency[chunk] += 1
return frequency
def _construct_all_SAX_chunk_dict(self):
"""
Construct the chunk dicts for lagging window and future window at each index.
e.g: Suppose we have a SAX sequence as '1234567890', both window sizes are 3, and the chunk size is 2.
The first index that has a lagging window is 3. For index equals 3, the lagging window has sequence '123',
the chunk to leave lagging window(lw_leave_chunk) is '12', and the chunk to enter lagging window(lw_enter_chunk) is '34'.
Therefore, given chunk dicts at i, to compute chunk dicts at i+1, simply decrement the count for lw_leave_chunk,
and increment the count for lw_enter_chunk from chunk dicts at i. Same method applies to future window as well.
"""
lag_dicts = {}
fut_dicts = {}
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
chunk_size = self.chunk_size
for i in range(length):
# If i is too small or too big, there will be no chunk dicts.
if i < lws or i > length - fws:
lag_dicts[i] = None
else:
# Just enter valid range.
if lag_dicts[i - 1] is None:
lag_dict = self._construct_SAX_chunk_dict(self.sax[i - lws: i])
lag_dicts[i] = lag_dict
lw_leave_chunk = self.sax[0:chunk_size]
lw_enter_chunk = self.sax[i - chunk_size + 1: i + 1]
fut_dict = self._construct_SAX_chunk_dict(self.sax[i: i + fws])
fut_dicts[i] = fut_dict
fw_leave_chunk = self.sax[i: i + chunk_size]
fw_enter_chunk = self.sax[i + fws + 1 - chunk_size: i + fws + 1]
else:
# Update dicts according to leave_chunks and enter_chunks.
lag_dict = copy(lag_dicts[i - 1])
lag_dict[lw_leave_chunk] -= 1
lag_dict[lw_enter_chunk] += 1
lag_dicts[i] = lag_dict
fut_dict = copy(fut_dicts[i - 1])
fut_dict[fw_leave_chunk] -= 1
fut_dict[fw_enter_chunk] += 1
fut_dicts[i] = fut_dict
# Updata leave_chunks and enter_chunks.
lw_leave_chunk = self.sax[i - lws: i - lws + chunk_size]
lw_enter_chunk = self.sax[i - chunk_size + 1: i + 1]
fw_leave_chunk = self.sax[i: i + chunk_size]
fw_enter_chunk = self.sax[i + fws + 1 - chunk_size: i + fws + 1]
self.lag_dicts = lag_dicts
self.fut_dicts = fut_dicts
def _compute_anom_score_between_two_windows(self, i):
"""
Compute distance difference between two windows' chunk frequencies,
which is then marked as the anomaly score of the data point on the window boundary in the middle.
:param int i: index of the data point between two windows.
:return float: the anomaly score.
"""
lag_window_chunk_dict = self.lag_dicts[i]
future_window_chunk_dict = self.fut_dicts[i]
score = 0
for chunk in lag_window_chunk_dict:
if chunk in future_window_chunk_dict:
score += math.pow(future_window_chunk_dict[chunk] - lag_window_chunk_dict[chunk], 2)
else:
score += math.pow(lag_window_chunk_dict[chunk], 2)
for chunk in future_window_chunk_dict:
if chunk not in lag_window_chunk_dict:
score += math.pow(future_window_chunk_dict[chunk], 2)
return score
def _set_scores(self):
"""
Compute anomaly scores for the time series by sliding both lagging window and future window.
"""
anom_scores = {}
self._generate_SAX()
self._construct_all_SAX_chunk_dict()
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
for i, timestamp in enumerate(self.time_series.timestamps):
if i < lws or i > length - fws:
anom_scores[timestamp] = 0
else:
anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i)
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))
| {
"content_hash": "9433b331fa03ffb4ae896542dafd9631",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 126,
"avg_line_length": 41.908212560386474,
"alnum_prop": 0.675043227665706,
"repo_name": "forever342/naarad",
"id": "41ef4c80636747f1aa391423d43bb09367bce135",
"size": "8691",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/luminol/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "83289"
},
{
"name": "CSS",
"bytes": "4420"
},
{
"name": "HTML",
"bytes": "41597"
},
{
"name": "JavaScript",
"bytes": "42346"
},
{
"name": "Python",
"bytes": "381979"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
from fnmatch import fnmatch
from urlobject import URLObject
from django import template
from django.template import TemplateSyntaxError
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils.text import get_text_list
import ttag
from sorter.conf import settings
from sorter.utils import cycle_pairs
register = template.Library()
class SorterAsTag(ttag.helpers.AsTag):
def clean(self, data, context):
"""
Checks if there is a ``request`` variable
included in the context.
"""
request = context.get('request')
if not request:
raise TemplateSyntaxError("Couldn't find request in context: %s" %
context)
return super(SorterAsTag, self).clean(data, context)
def clean_with(self, value):
"""
Cleans the given name of the sort query
"""
if not isinstance(value, str):
raise TemplateSyntaxError("Value '%s' is not a string" % value)
# in case the value equals the default query name
# or it already has the default query name prefixed
if (value == settings.SORTER_DEFAULT_QUERY_NAME or
value.startswith(settings.SORTER_DEFAULT_QUERY_NAME)):
return value
return '%s_%s' % (settings.SORTER_DEFAULT_QUERY_NAME, value)
class Sort(SorterAsTag):
"""
{% sort queryset [with NAME] as VARIABLE %}
{% sort object_list with "objects" as sorted_objects %}
"""
data = ttag.Arg()
with_ = ttag.Arg(named=True, required=False, default=settings.SORTER_DEFAULT_QUERY_NAME)
def as_value(self, data, context):
value = data['data']
ordering = self.ordering(context, data['with'])
if ordering:
return value.order_by(*ordering)
return value
def ordering(self, context, name):
"""
Given the template context and the name of the sorting
should return a list of ordering values.
"""
try:
sort_fields = context['request'].GET[name].split(',')
except (KeyError, ValueError, TypeError):
return []
result = []
allowed_criteria = settings.SORTER_ALLOWED_CRITERIA.get(name)
if allowed_criteria is None:
return result
for sort_field in sort_fields:
for criteria in allowed_criteria:
if fnmatch(sort_field.lstrip('-'), criteria):
result.append(sort_field)
return result
class TemplateAsTagOptions(ttag.helpers.as_tag.AsTagOptions):
def __init__(self, meta, *args, **kwargs):
super(TemplateAsTagOptions, self).__init__(meta=meta, *args, **kwargs)
self.template_name = getattr(meta, 'template_name', 'sortlink')
class TemplateAsTagMetaclass(ttag.helpers.as_tag.AsTagMetaclass):
options_class = TemplateAsTagOptions
class SortURL(SorterAsTag, metaclass=TemplateAsTagMetaclass):
"""
Parses a tag that's supposed to be in this format:
{% sorturl [with NAME] [rel REL] [class CLASS] [as VARIABLE] by ORDER_A1[,ORDER_A2,..] [ORDER_B1[,ORDER_B2,..]] .. %}
{% sorturl with "objects" by "creation_date,-title" %}
"""
with_ = ttag.Arg(required=False, named=True, default=settings.SORTER_DEFAULT_QUERY_NAME)
rel = ttag.Arg(required=False, named=True)
class_ = ttag.Arg(required=False, named=True)
by = ttag.MultiArg(named=True)
class Meta:
as_required = False
template_name = 'sorturl'
name = 'sorturl'
def as_value(self, data, context):
# The queries of the current URL, not using sequences here
# since the order of sorting arguments matter
url = URLObject(context['request'].get_full_path())
queries = url.query.dict
name, orderings = data['with'], data['by']
query = self.find_query(queries.get(name), orderings, orderings[0])
url = url.set_query_param(name, query)
# If this isn't a block tag we probably only want the URL
if not self._meta.block:
return url
label = self.nodelist.render(context)
if not label.strip():
raise TemplateSyntaxError("No label was specified")
parts = []
for part in query.split(','):
part = part.strip()
if part.startswith('-'):
part = part.lstrip('-')
# Translators: Used in title of descending sort fields
text = _("'%(sort_field)s' (desc)")
else:
# Translators: Used in title of ascending sort fields
text = _("'%(sort_field)s' (asc)")
parts.append(text % {'sort_field': part})
# Translators: Used for the link/form input title excluding the sort fields
title = (_('Sort by: %(sort_fields)s') %
{'sort_fields': get_text_list(parts, _('and'))})
extra_context = dict(data, title=title, label=label, url=url, query=query)
extra_context.update(context.flatten())
return render_to_string(self.using(data), extra_context)
def find_query(self, wanted, orderings, default):
"""
Given the list of order statements and a query that is currently
found in the request's querystring returns the next in line,
or falls back to the given default.
"""
for current, next in cycle_pairs(orderings):
if current == wanted:
return next
return default
def using(self, data):
"""
This template tag will use 'sorter/sorturl.html' by default,
but uses 'sorter/sorturl_NAME.html' additionally if the
'with' argument is given.
"""
name = data.get('with')
template_names = [self._meta.template_name]
if name and name != settings.SORTER_DEFAULT_QUERY_NAME:
template_names.append(u'%s_%s' % (self._meta.template_name, name))
return [u"sorter/%s.html" % name for name in template_names]
class Sortlink(SortURL):
"""
Parses a tag that's supposed to be in this format:
{% sortlink [with NAME] [rel REL] [class CLASS] [as VARIABLE] by ORDER_A1[,ORDER_A2,..] [ORDER_B1[,ORDER_B2,..]] .. %}
LABEL
{% endsortlink %}
{% sortlink with "objects" by "creation_date,-title" %}
{% trans "Creation and title" %}
{% endsortlink %}
"""
class Meta:
block = True
as_required = False
template_name = 'sortlink'
class Sortform(SortURL):
"""
Parses a tag that's supposed to be in this format:
{% sortform [with NAME] [rel REL] [class CLASS] [as VARIABLE] by ORDER_A1[,ORDER_A2,..] [ORDER_B1[,ORDER_B2,..]] .. %}
LABEL
{% endsortform %}
{% sortform with "objects" by "creation_date,-title" %}
{% trans "Creation and title" %}
{% endsortform %}
"""
class Meta:
block = True
as_required = False
template_name = 'sortform'
register.tag(Sort)
register.tag(SortURL)
register.tag(Sortlink)
register.tag(Sortform)
| {
"content_hash": "4ad3c6fa6918e451fe8a2d49d45398a6",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 122,
"avg_line_length": 33.35981308411215,
"alnum_prop": 0.6069477517859644,
"repo_name": "caktus/django-sorter",
"id": "a3668b01ff8ae074258b4de181b59248740d8ccc",
"size": "7139",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "sorter/templatetags/sorter_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "150"
},
{
"name": "Python",
"bytes": "19113"
}
],
"symlink_target": ""
} |
import sys
if sys.version_info < (3, 7):
from ._show import ShowValidator
from ._pattern import PatternValidator
from ._fill import FillValidator
from ._count import CountValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._show.ShowValidator",
"._pattern.PatternValidator",
"._fill.FillValidator",
"._count.CountValidator",
],
)
| {
"content_hash": "161e038c26941ca45895b321c625c05f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 55,
"avg_line_length": 26.15,
"alnum_prop": 0.5697896749521989,
"repo_name": "plotly/python-api",
"id": "c75d5c08248422286705ae018312fc59d84bd4ca",
"size": "523",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/isosurface/surface/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""PyTorch Inference Script
An example inference script that outputs top-k class ids for images in a folder into a csv.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import os
import time
import argparse
import logging
import numpy as np
import torch
from timm.models import create_model, apply_test_time_pool
from timm.data import ImageDataset, create_loader, resolve_data_config
from timm.utils import AverageMeter, setup_default_logging
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('inference')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Inference')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--output_dir', metavar='DIR', default='./',
help='path to output files')
parser.add_argument('--model', '-m', metavar='MODEL', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=1000,
help='Number classes in dataset')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true',
help='disable test time pool')
parser.add_argument('--topk', default=5, type=int,
metavar='N', help='Top-k to output to CSV')
def main():
setup_default_logging()
args = parser.parse_args()
# might as well try to do something useful...
args.pretrained = args.pretrained or not args.checkpoint
# create model
model = create_model(
args.model,
num_classes=args.num_classes,
in_chans=3,
pretrained=args.pretrained,
checkpoint_path=args.checkpoint)
_logger.info('Model %s created, param count: %d' %
(args.model, sum([m.numel() for m in model.parameters()])))
config = resolve_data_config(vars(args), model=model)
model, test_time_pool = (model, False) if args.no_test_pool else apply_test_time_pool(model, config)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model = model.cuda()
loader = create_loader(
ImageDataset(args.data),
input_size=config['input_size'],
batch_size=args.batch_size,
use_prefetcher=True,
interpolation=config['interpolation'],
mean=config['mean'],
std=config['std'],
num_workers=args.workers,
crop_pct=1.0 if test_time_pool else config['crop_pct'])
model.eval()
k = min(args.topk, args.num_classes)
batch_time = AverageMeter()
end = time.time()
topk_ids = []
with torch.no_grad():
for batch_idx, (input, _) in enumerate(loader):
input = input.cuda()
labels = model(input)
topk = labels.topk(k)[1]
topk_ids.append(topk.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
batch_idx, len(loader), batch_time=batch_time))
topk_ids = np.concatenate(topk_ids, axis=0)
with open(os.path.join(args.output_dir, './topk_ids.csv'), 'w') as out_file:
filenames = loader.dataset.filenames(basename=True)
for filename, label in zip(filenames, topk_ids):
out_file.write('{0},{1}\n'.format(
filename, ','.join([ str(v) for v in label])))
if __name__ == '__main__':
main()
| {
"content_hash": "23d162a462d91c5c42f86448c3366ef2",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 137,
"avg_line_length": 41.36507936507937,
"alnum_prop": 0.6162701458173446,
"repo_name": "rwightman/pytorch-image-models",
"id": "5fcf1e60245703bb3d360ec7bc523bed06edfda4",
"size": "5235",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "inference.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2368284"
},
{
"name": "Shell",
"bytes": "108"
}
],
"symlink_target": ""
} |
from .base import Strategy
class RarityStrategy(Strategy):
def check_rarity(self, length):
rarity = [{'rarity': 0, 'index': i, 'peers': []} for i in range(length)]
for peer in self.pieces:
for index, item in enumerate(rarity):
item['rarity'] += int(self.pieces[peer]['bitfield'][index])
item['peers'].append(self.pieces[peer]['peer'])
return rarity
def build(self):
rarity = sorted(self.check_rarity(10),
key=lambda piece: piece['rarity'])
self.start_requesting(rarity)
def start_requesting(self, rarity):
# piece / 16K
BLOCK_SIZE = 2 ** 14
piece_blocks = (self.torrent['info']['length'] / 10) / BLOCK_SIZE
for piece in rarity:
blocks_per_peer = piece_blocks / len(piece['peers'])
offset = piece['index'] * (self.torrent['info']['length'] / 10)
for index, peer in enumerate(piece['peers']):
start = offset + index * blocks_per_peer * BLOCK_SIZE
end = offset + (index + 1) * blocks_per_peer * BLOCK_SIZE
block_index = 0
while start < end:
peer.request(piece['index'], block_index, BLOCK_SIZE)
start += BLOCK_SIZE
block_index += 1
| {
"content_hash": "7fb85f96efad198f27590fa2f19a2b4e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 76,
"avg_line_length": 35.26470588235294,
"alnum_prop": 0.5988323603002502,
"repo_name": "vtemian/university_projects",
"id": "cf94c7dcb49b4914fb1335f24d7a2c82a6a31a88",
"size": "1199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_structures/bitorrent/client/strategies/rarity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15965"
},
{
"name": "C++",
"bytes": "9417"
},
{
"name": "Python",
"bytes": "69909"
}
],
"symlink_target": ""
} |
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
import string
import requests
import random
WORD_GAME_TOPICS = [
"3-Letter Words", "4-Letter Words", "5-Letter Words", "A Baseball Player's Name",
"A bird", "A boy's name", "A drink",
"A fish", "A Football Player's Name", "A girl's name",
"A relative", "A river", "Abbreviations",
"Acronyms", "Action Figures", "Action Words",
"Actors", "Actresses", "Adjectives",
"African Animals", "African Countries", "After-School Activities",
"Airlines", "Alcoholic Drinks", "Amphibians",
"An animal", "Animal Homes", "Animal noises",
"Animals Found in Foreign Lands", "Animals in books or movies", "Animals That Advertise Products",
"Animals That Are a Certain Color", "Animals That Fly", "Animals That Hop or Jump",
"Animals That Live Underground", "Animals That Swim", "Animals",
"Any Green Food or Drink", "Appliances ", "Appliances",
"Arctic Animals", "Areas of Mathematics Study", "Areas of Study",
"Articles of Clothing", "Artists", "Asian Animals",
"Asian Capital Cities", "Asian Countries", "At The Zoo",
"Athletes Who Do Commercials", "Athletes", "Australian/New Zealand Animals",
"Authors", "Automobiles", "Awards/ceremonies",
"Baby Clothes", "Baby foods", "Bad habits",
"Bands with One-word Names", "Bathroom Accessories", "Beers",
"Beverages", "Birds", "Blockbuster Movies",
"Board games", "Bodies of water", "Bones of the Body",
"Book Titles", "Books,Movies,or TV Shows about About Sports", "Boy Bands",
"Breakfast Cereals", "Breakfast foods", "Building Toys",
"Canadian Provinces", "Cancelled TV Shows", "Candy",
"Canned Food", "Capitals", "Car Parts",
"Card games", "Carpentry Tools", "Cars",
"Cartoon characters", "Cat Breeds", "Celebrations Where Gifts Are Given",
"Celebrities You'd Like to Meet", "Celebrities", "Chemicals",
"Children's books", "Children's Games", "Children's Songs",
"Children's TV Shows", "Childrens Books", "Chinese Food",
"Christmas Carols", "Christmas songs", "Cities",
"Classic Commercials", "Classic Movies", "Classic Toys",
"Classical Music", "Clothing Worn by Cowboys", "Clothing",
"Cocktails", "Cold Climates", "Cold Drinks",
"Cold Places", "College Majors", "Colleges/Universities",
"Colors", "Comedies", "Comedy Shows",
"Companies", "Compound Nouns Formed With 'Life'", "Compound Nouns Formed With 'Light' (Flashlight,Spotlight,etc.)",
"Compound Nouns Formed With 'Time'", "Computer parts", "Computer programs",
"Condiments", "Constellations", "Contractions",
"Cooking Shows", "Cooking utensils", "Cosmetics/Toiletries",
"Countries", "Country Flags", "Country Names Beginning With a Particular Letter",
"Couples", "Crimes", "Cruises",
"Dairy Products", "Dangerous Animals", "Daytime TV Shows",
"Desk Accessories", "Desserts", "Diet foods",
"Diseases", "Disgusting Things to Eat or Drink ", "Disney Movies",
"Dog Breeds", "Dolls", "Drugs that are abused",
"Eighties Music", "Electronic gadgets", "Entertainment",
"Equipment", "Ethnic foods", "European Animals",
"European Capital Cities", "European Countries", "Excuses for being late",
"Famous Artists", "Famous Characters", "Famous Children",
"Famous duos and trios", "Famous Females", "Famous Paintings",
"Famous Players", "Fantasy", "Farm Animals",
"Fast Animals", "Fast Food Restaurant Names", "Fast-Food",
"Fears", "Female Athletes", "Female Singers",
"Female Stars", "Fictional characters", "Fictitious Places",
"Fish", "Floor Coverings", "Flowers",
"Folk Songs", "Food at a Carnival or Fair", "Food Found in a Casserole",
"Food Found In a Deli ", "Food You Eat Raw", "Food/Drink that is green",
"Foods you eat raw", "Footware", "Footwear",
"Foreign Cities", "Foreign words used in English", "Foreign Words",
"Foriegn cities", "Found in a Salad Bar", "Four-Legged Animals",
"Fried Foods", "From TV,Movies,or Books ", "Fruits",
"Furniture by Room (i.e. bedroom,kitchen,etc.)", "Furniture in This Room", "Furniture You Sit On (or At)",
"Furniture", "Game terms", "Games",
"Gardening Tasks", "Gems", "Gifts for the Bride & Groom",
"Gifts", "Gifts/Presents", "Gourmet Foods",
"Halloween costumes", "Health Food", "Heroes",
"Historic events", "Historical Figures", "Hobbies",
"Holiday Activities ", "Holiday Activities", "Holiday Songs",
"Holidays", "Honeymoon spots", "Horror Movies",
"Hors D'oeuvres", "Hot Drinks", "Hot Places",
"Household chores", "Ice cream flavors", "In Europe",
"In National Geographic Magazine", "In North America", "In the NWT (Northwest Territories,Canada)",
"In Your Hometown", "Insects", "Internal Organs",
"Internet lingo", "Internet", "iPhone Apps",
"Islands", "Italian Food", "Items in a catalog",
"Items in a kitchen", "Items in a Refrigerator", "Items in a suitcase",
"Items in a vending machine", "Items in this room", "Items in Your Purse/Wallet",
"Items you save up to buy", "Items you take on a road trip", "Items You Take On A Trip",
"Junk Food", "Kinds of candy", "Kinds of Dances",
"Kinds of soup", "Kitchen Appliances", "Lakes",
"Languages", "Last Names", "Legal Terms",
"Leisure activities", "Long-Running TV Series", "Love Songs",
"Love Stories", "Low Calorie Foods", "Magazines",
"Male Singers", "Male Stars", "Mammals",
"Mascots", "Math Functions", "Math terms",
"Mechanic's Tools", "Medical Terms", "Medicine Names",
"Medicine/Drugs", "Men's Clothing", "Menu items",
"Metals", "Mexican Food", "Mexican Foods",
"Military Leaders", "Minerals", "Models",
"Mountain Ranges", "Movie Stars (Dead)", "Movie Stars (Living)",
"Movie Theme Songs", "Movie titles", "Movies on TV",
"Music Programs", "Musical groups", "Musical Instruments",
"Mythological Characters", "Names used in songs", "Names Used in the Bible",
"Nationalities", "Newscasters/Journalists", "Nickelodeon",
"Nicknames", "Nineties Music", "Nintendo",
"North/South American Animals", "North/South American Countries", "Not On Planet Earth",
"Notorious people", "Nouns", "Nursery Rhymes",
"Nursing Terms", "Occupations", "Ocean things",
"Oceans", "Offensive words", "Office Items",
"Office Tools", "Olympic events", "On a Wine List",
"Parks", "Parts of Speech", "Parts of the body",
"People in Uniform", "People Who Do Dangerous Jobs", "People Who Do Door To Door",
"People Who Work Alone", "People Who Work at Night", "People You Admire",
"People You Aviod", "People's Names Used in Songs", "Periodic Table Elements",
"Personality traits", "Pets", "Photography",
"Pizza toppings", "Places in Europe", "Places To Hang Out",
"Places to hangout", "Places You Wouldn't Want to live", "Played Inside",
"Played Outside", "Plumbing Tools", "Political Figures",
"Possessive Pronouns", "Presidents", "Prime Time TV",
"Pro Sports Teams", "Produce", "Product Names",
"Pronouns", "Provinces or States", "Punctuation",
"Rappers", "Reality TV", "Reasons to be Absent",
"Reasons to call 911", "Reasons to Go to the Principal's Office", "Reasons to make a phone call",
"Reasons to quit your job", "Reasons to take out a loan", "Reference Books",
"Reptiles", "Reptiles/Amphibians", "Restaurants",
"Rivers", "Road Signs", "Sales Terms",
"Sandwiches", "School subjects", "School supplies",
"Science Fiction", "Science Terms", "Scientific Disciplines",
"Seafood", "Seas", "Seventies Music",
"Sex Symbols", "Shows You Don't Like", "Singers",
"Sit Coms", "Sixties Music", "Slow Animals",
"Snacks", "Soft Drinks", "Software",
"Someone From Your Past", "Something you keep hidden", "Something you're afraid of",
"Song titles", "Songs with a Name in the Title", "South American Countries",
"Spices", "Spices/Herbs", "Spicy foods",
"Sporting Events", "Sports equipment", "Sports equiptment",
"Sports Mascots", "Sports Personalities", "Sports Played Indoors",
"Sports Played Inside", "Sports played outdoors", "Sports Played Outside",
"Sports Stars", "Sports Teams", "Sports Terms",
"Sports", "Stars Who Appear in Both TV & Movies", "States",
"Stones/Gems", "Store names", "Street Names",
"Styles of Shoes", "Summer Olympics Sports", "Superlative Adjectives",
"T.V. Show Theme Songs", "T.V. Shows", "Teaching Tasks",
"Teaching Terms", "Team Names", "Television stars",
"Terms of endearment", "Terms of Measurement", "Terms Referring to rain,snow,etc.",
"Terms", "Theme Songs", "Things Animals Eat",
"Things Associated with Autumn", "Things Associated with Spring", "Things Associated with Summer",
"Things Associated with Winter", "Things at a carnival", "Things at a circus",
"Things at a football game", "Things found at a bar", "Things Found in a Basement Cellar",
"Things found in a desk", "Things found in a hospital", "Things Found in a Locker",
"Things Found in a Park", "Things found in New York", "Things Found in the Cafeteria",
"Things Found in the Water", "Things Found On a Map", "Things From a Stationary Store",
"Things in a Classroom", "Things in a grocery store", "Things in a medicine cabinet",
"Things in a park", "Things in a Souvenir Shop", "Things in the kitchen",
"Things in the sky", "Things Made of Metal", "Things On a Beach",
"Things Sold in Commercials", "Things that are black", "Things that are cold",
"Things that Are Flat (Coin,Paper,Floor,Etc.)", "Things that are Found in the Ocean", "Things that are hot",
"Things that Are in a Medicine Cabinet", "Things that Are in a Park", "Things that Are in the Sky",
"Things That Are Loud", "Things that Are Made of Glass", "Things that Are Made of Plastic",
"Things that Are Made of Wood", "Things that Are Naturally Round", "Things that Are Naturally Yellow,Blue,Red,Etc.",
"Things That Are Red", "Things that are round", "Things that are square",
"Things that are sticky", "Things that Are Terrifying", "Things That Are White",
"Things that Burn", "Things that can get you fired", "Things that can kill you",
"Things that Cost a Lot", "Things that Do Not Break When Dropped", "Things That Feel Hot",
"Things That Feel Soft", "Things that Found at a Circus", "Things that grow",
"Things that have buttons", "Things that have spots", "Things that have stripes",
"Things that have wheels", "Things that Have Wings", "Things that Jump or Bounce",
"Things that jump/bounce", "Things that Make You Itch", "Things that make you smile",
"Things that People Lose", "Things that Smell Bad", "Things that Smell Good",
"Things That Taste Spicy", "Things that use a remote", "Things that You Wear",
"Things to do at a party", "Things to do on a date", "Things With Stripes",
"Things with tails", "Things Worn From the Waist Down", "Things Worn From the Waist Up",
"Things you buy for kids", "Things You Can See", "Things You Carry",
"Things you do at work", "Things You Do Every Day", "Things you do everyday",
"Things You Do in Gym Class", "Things You Do in Study Hall", "Things You Do While Watching TV",
"Things You Don't Want to Hear", "Things you get in the mail", "Things you get tickets for",
"Things you make", "Things You Need Tickets To See", "Things You Never Tasted",
"Things You Plug in", "Things you replace", "Things you save up to buy",
"Things You Scream at Officials", "Things you see at the zoo", "Things You See in a City",
"Things you shouldn't touch", "Things you shout", "Things You Sit In/on",
"Things you store items in", "Things You Study in Geography", "Things You Study in History",
"Things you throw away", "Things you wear", "Things you're allergic to",
"Titles people can have", "Tools", "Tourist attractions",
"Toys", "Train Travel Destinations", "Trees",
"Tropical Locations", "TV Character Names", "TV Shows",
"TV Stars", "Types of Art (i.e. Fine,Abstract,etc.)", "Types of Cheese",
"Types of Drink", "Types of drinks", "Types of Meat",
"Types of Rocks", "Types of Toys", "Types of weather",
"U.S. Cities", "Under Garments", "United States Capitals",
"Units of Measure", "Vacation spots", "Vegetable Garden Plants",
"Vegetables", "Vehicles", "Video games",
"Villains", "Villains/Monsters", "Villians",
"Wall Coverings", "Warm Climates", "Water Sports",
"Ways to get from here to there", "Ways to kill time", "Weapons",
"Weather", "Websites", "Weekend Activities",
"Window Coverings", "Winter Olympics Sports", "Wireless things",
"With A High Altitude", "Women's Clothing", "Words associated with exercise",
"Words associated with money", "Words associated with winter", "Words Beginning With a Particular Letter",
"Words Beginning With the Prefix '-Mis'", "Words Beginning With the Prefix '-Un'", "Words Ending in '-ed'",
"Words Ending in '-ly'", "Words ending in '-n'", "Words Said In Anger",
"Words That Can Be Used as Conjunctions", "Words that End in '-ing'", "Words with a Double Letter",
"Words with double letters", "World Leaders/Politicians", "World Records",
]
class WordGamePlugin(WillPlugin):
@respond_to("^(play a word game|scattegories)(\!\.)?$")
def word_game_round(self, message):
"play a word game: Play a game where you think of words that start with a letter and fit a topic."
letter = random.choice(string.ascii_uppercase)
topics = []
while len(topics) < 10:
new_topic = random.choice(WORD_GAME_TOPICS)
if new_topic not in topics:
topics.append({
"index": len(topics) + 1,
"topic": new_topic
})
context = {
"letter": letter,
"topics": topics
}
self.say(rendered_template("word_game.html", context), message=message)
| {
"content_hash": "b9a49f95dca68f32f466b604f36683ed",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 120,
"avg_line_length": 62.42920353982301,
"alnum_prop": 0.6659578992132681,
"repo_name": "woohgit/will",
"id": "4eedad805ac7c501a6bd00440ae307c80a970ec7",
"size": "14109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "will/plugins/fun/wordgame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2008"
},
{
"name": "Python",
"bytes": "312884"
},
{
"name": "Shell",
"bytes": "1940"
}
],
"symlink_target": ""
} |
import time
import typing
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.types import WaitExcTypes
POLL_FREQUENCY: float = 0.5 # How long to sleep in between calls to the method
IGNORED_EXCEPTIONS: typing.Tuple[typing.Type[Exception]] = (NoSuchElementException,) # default to be ignored.
class WebDriverWait:
def __init__(
self,
driver,
timeout: float,
poll_frequency: float = POLL_FREQUENCY,
ignored_exceptions: typing.Optional[WaitExcTypes] = None,
):
"""Constructor, takes a WebDriver instance and timeout in seconds.
:Args:
- driver - Instance of WebDriver (Ie, Firefox, Chrome or Remote)
- timeout - Number of seconds before timing out
- poll_frequency - sleep interval between calls
By default, it is 0.5 second.
- ignored_exceptions - iterable structure of exception classes ignored during calls.
By default, it contains NoSuchElementException only.
Example::
from selenium.webdriver.support.wait import WebDriverWait \n
element = WebDriverWait(driver, 10).until(lambda x: x.find_element(By.ID, "someId")) \n
is_disappeared = WebDriverWait(driver, 30, 1, (ElementNotVisibleException)).\\ \n
until_not(lambda x: x.find_element(By.ID, "someId").is_displayed())
"""
self._driver = driver
self._timeout = float(timeout)
self._poll = poll_frequency
# avoid the divide by zero
if self._poll == 0:
self._poll = POLL_FREQUENCY
exceptions = list(IGNORED_EXCEPTIONS)
if ignored_exceptions:
try:
exceptions.extend(iter(ignored_exceptions))
except TypeError: # ignored_exceptions is not iterable
exceptions.append(ignored_exceptions)
self._ignored_exceptions = tuple(exceptions)
def __repr__(self) -> str:
return '<{0.__module__}.{0.__name__} (session="{1}")>'.format(type(self), self._driver.session_id)
def until(self, method, message: str = ""):
"""Calls the method provided with the driver as an argument until the \
return value does not evaluate to ``False``.
:param method: callable(WebDriver)
:param message: optional message for :exc:`TimeoutException`
:returns: the result of the last call to `method`
:raises: :exc:`selenium.common.exceptions.TimeoutException` if timeout occurs
"""
screen = None
stacktrace = None
end_time = time.monotonic() + self._timeout
while True:
try:
value = method(self._driver)
if value:
return value
except self._ignored_exceptions as exc:
screen = getattr(exc, "screen", None)
stacktrace = getattr(exc, "stacktrace", None)
time.sleep(self._poll)
if time.monotonic() > end_time:
break
raise TimeoutException(message, screen, stacktrace)
def until_not(self, method, message: str = ""):
"""Calls the method provided with the driver as an argument until the \
return value evaluates to ``False``.
:param method: callable(WebDriver)
:param message: optional message for :exc:`TimeoutException`
:returns: the result of the last call to `method`, or
``True`` if `method` has raised one of the ignored exceptions
:raises: :exc:`selenium.common.exceptions.TimeoutException` if timeout occurs
"""
end_time = time.monotonic() + self._timeout
while True:
try:
value = method(self._driver)
if not value:
return value
except self._ignored_exceptions:
return True
time.sleep(self._poll)
if time.monotonic() > end_time:
break
raise TimeoutException(message)
| {
"content_hash": "7beb0a2dcbe21735d037f8a289a6d3e0",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 110,
"avg_line_length": 40.495049504950494,
"alnum_prop": 0.6073349633251833,
"repo_name": "valfirst/selenium",
"id": "12d9990aa7cdbf4b8bd9b95bbbb491dfe609f1d8",
"size": "4878",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "py/selenium/webdriver/support/wait.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "4443"
},
{
"name": "C",
"bytes": "82917"
},
{
"name": "C#",
"bytes": "2990022"
},
{
"name": "C++",
"bytes": "2285448"
},
{
"name": "CSS",
"bytes": "1049"
},
{
"name": "Dockerfile",
"bytes": "1737"
},
{
"name": "HTML",
"bytes": "1379853"
},
{
"name": "Java",
"bytes": "6286458"
},
{
"name": "JavaScript",
"bytes": "2535395"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "988077"
},
{
"name": "Ragel",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "1036679"
},
{
"name": "Rust",
"bytes": "45287"
},
{
"name": "Shell",
"bytes": "29804"
},
{
"name": "Starlark",
"bytes": "401750"
},
{
"name": "TypeScript",
"bytes": "126843"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
} |
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.prepayment_balance_list_response import PrepaymentBalanceListResponse
class TestPrepaymentBalanceListResponse(unittest.TestCase):
""" PrepaymentBalanceListResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testPrepaymentBalanceListResponse(self):
"""
Test PrepaymentBalanceListResponse
"""
model = kinow_client.models.prepayment_balance_list_response.PrepaymentBalanceListResponse()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "aea047fc43347bd43d26dd12d80e0162",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 100,
"avg_line_length": 21.75,
"alnum_prop": 0.7091954022988506,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "3de50ea6681a962d9d2549c174b0ee76aa2c7e68",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_prepayment_balance_list_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("plan", "0030_plan_priority")]
operations = [
migrations.RemoveField(model_name="plan", name="priority"),
migrations.AddField(
model_name="plan",
name="queue",
field=models.CharField(
choices=[
("default", "default"),
("medium", "medium priority"),
("high", "high priority"),
],
default="default",
max_length=16,
),
),
]
| {
"content_hash": "0d20973cd3cc4d1a07fbede4398bb4b1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 67,
"avg_line_length": 27.391304347826086,
"alnum_prop": 0.4714285714285714,
"repo_name": "SalesforceFoundation/mrbelvedereci",
"id": "61a2a4fa85a5c0115dfd711b9aa119a5937c4e9d",
"size": "679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metaci/plan/migrations/0031_plan_queue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2069"
},
{
"name": "HTML",
"bytes": "123214"
},
{
"name": "JavaScript",
"bytes": "3993"
},
{
"name": "Python",
"bytes": "245560"
},
{
"name": "Shell",
"bytes": "4590"
}
],
"symlink_target": ""
} |
"""
The ScikitLearn folder includes different types of Scikitlearn GuassianProcess based ROMs
that are available via RAVEN
"""
| {
"content_hash": "7640f1addb0dbcb4ef344fc83b458d4a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 91,
"avg_line_length": 32.75,
"alnum_prop": 0.7862595419847328,
"repo_name": "joshua-cogliati-inl/raven",
"id": "7a03bb39b52fcf2ff46f3ee2698a6d91d96cb0e8",
"size": "720",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "ravenframework/SupervisedLearning/ScikitLearn/GaussianProcess/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1556080"
},
{
"name": "Batchfile",
"bytes": "1095"
},
{
"name": "C",
"bytes": "148504"
},
{
"name": "C++",
"bytes": "48279546"
},
{
"name": "CMake",
"bytes": "9998"
},
{
"name": "Jupyter Notebook",
"bytes": "84202"
},
{
"name": "MATLAB",
"bytes": "202335"
},
{
"name": "Makefile",
"bytes": "2399"
},
{
"name": "Perl",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "6952659"
},
{
"name": "R",
"bytes": "67"
},
{
"name": "SWIG",
"bytes": "8574"
},
{
"name": "Shell",
"bytes": "124279"
},
{
"name": "TeX",
"bytes": "479725"
}
],
"symlink_target": ""
} |
'''
The MIT License (MIT)
https://github.com/robertchase/spindrift/blob/master/LICENSE.txt
'''
from importlib import import_module
import logging
import logging.config
import platform
import signal
from ergaleia.normalize_path import normalize_path
from spindrift.database.db import DB
from spindrift.micro_fsm.handler import InboundHandler, MysqlHandler
from spindrift.micro_fsm.parser import Parser as parser
from spindrift.rest.handler import RESTContext
from spindrift.rest.mapper import RESTMapper, RESTMethod
from spindrift.network import Network
from spindrift.timer import Timer
import spindrift.micro_fsm.connect as micro_connection
log = logging.getLogger(__name__)
def trace(state, event, is_default, is_internal):
log.debug('parser s={}, e={}'.format(state, event))
class Micro(object):
def __init__(self):
self.network = Network()
self.timer = Timer()
self.connection = type('Connections', (object,), dict())
def load(self, micro='micro', config=None, is_trace=False):
self.parser = parser.parse(micro, trace if is_trace else None)
if config:
self.parser.config._load(config)
return self
def setup(self):
parser = self.parser
config = parser.config
setup_log(config)
setup_database(config, self)
setup_servers(config, self, parser.servers)
setup_connections(config, self, parser.connections)
return self
def run(self):
teardown = self.parser.teardown
start(self, self.parser.setup)
del self.__dict__['parser'] # parser not available during run
run(self)
stop(teardown)
self.close()
def close(self):
self.network.close()
micro = Micro()
def db_cursor(rest_handler):
""" Add a databse cursor to a request
The cursor is added to the request as the attribute 'cursor'
and set to automatically close on request.respond. The
delay() method is called on the request object to allow
async calls to continue without a premature response.
"""
def inner(request, *args, **kwargs):
cursor = micro.db.cursor
cursor.cid = request.id
request.cursor = cursor
request.cleanup = cursor.close
request.delay()
rest_handler(request, *args, **kwargs)
return inner
class MicroContext(RESTContext):
def __init__(
self, mapper,
http_max_content_length,
http_max_line_length,
http_max_header_count
):
super(MicroContext, self).__init__(mapper)
self.http_max_content_length = http_max_content_length
self.http_max_line_length = http_max_line_length
self.http_max_header_count = http_max_header_count
class MicroHandler(InboundHandler):
def __init__(self, *args, **kwargs):
super(InboundHandler, self).__init__(*args, **kwargs)
context = self.context
self.http_max_content_length = context.http_max_content_length
self.http_max_line_length = context.http_max_line_length
self.http_max_header_count = context.http_max_header_count
def on_rest_exception(self, exception_type, value, trace):
log.exception('rest handler exception')
def _import(item_path, is_module=False):
if is_module:
return import_module(item_path)
path, function = item_path.rsplit('.', 1)
module = import_module(path)
return getattr(module, function)
def _load(path):
path = normalize_path(path, filetype='micro')
p = parser.parse(path)
return p
def setup_signal():
def toggle_debug(signal, frame):
logger = logging.getLogger()
if logger.getEffectiveLevel() == logging.INFO:
level = logging.DEBUG
name = 'DEBUG'
else:
level = logging.INFO
name = 'INFO'
logger.setLevel(level)
log.info('log level set to %s', name)
signal.signal(signal.SIGUSR1, toggle_debug)
def setup_log(config):
config = config.log
name = config.name
level = config.level.upper()
stdout = config.is_stdout
conf = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'syslog': {
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
'syslog': {
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'formatter': 'syslog',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': level,
'propagate': True,
},
},
}
body = ' [%(levelname)s] %(name)s:%(lineno)d> %(message)s'
conf['formatters']['standard']['format'] = '%(asctime)s ' + name + body
conf['formatters']['syslog']['format'] = name + body
if platform.system() == 'Darwin':
conf['handlers']['syslog']['address'] = '/var/run/syslog'
if stdout:
conf['handlers']['default'] = conf['handlers']['console']
del conf['handlers']['syslog']
else:
conf['handlers']['default'] = conf['handlers']['syslog']
logging.config.dictConfig(conf)
log.info('log level set to %s', level)
setup_signal()
def _fsm_trace(s, e, d, i):
log.debug('mysql fsm s=%s, e=%s, is_internal=%s', s, e, i)
def setup_database(config, micro):
try:
db = config.db
except Exception:
return
if db.is_active:
micro.db = DB(
micro.network,
user=db.user,
pswd=db.password,
db=db.database,
host=db.host,
port=db.port,
isolation=db.isolation,
handler=MysqlHandler,
fsm_trace=_fsm_trace if db.fsm_trace else None,
)
context = micro.db.context
context.timer = micro.timer
context.timeout = db.timeout
context.long_query = db.long_query
def setup_servers(config, micro, servers):
for server in servers.values():
conf = config._get('server.%s' % server.name)
if conf.is_active is False:
continue
mapper = RESTMapper()
context = MicroContext(
mapper,
conf.http_max_content_length,
conf.http_max_line_length,
conf.http_max_header_count,
)
for routenum, route in enumerate(server.routes, start=1):
methods = {}
for name, defn in route.methods.items():
try:
method = RESTMethod(defn.path)
for arg in route.args:
method.add_arg(arg.type)
for arg in defn.content:
method.add_content(arg.name, arg.type, arg.is_required)
methods[name] = method
except Exception as e:
raise Exception(
'error setting up server: {}'.format(str(e))
)
mapper.add(route.pattern, methods)
try:
handler = _import(conf.handler, is_module=True)
except KeyError:
handler = MicroHandler
micro.network.add_server(
port=conf.port,
handler=handler,
context=context,
is_ssl=conf.ssl.is_active,
ssl_certfile=conf.ssl.certfile,
ssl_keyfile=conf.ssl.keyfile,
)
log.info('listening on %s port %d', server.name, conf.port)
def setup_connections(config, micro, connections):
for c in connections.values():
conf = config._get('connection.%s' % c.name)
headers = {}
for header in c.headers.values():
if header.config:
value = config._get(
'connection.%s.header.%s' % (c.name, header.config)
)
else:
value = _import(header.code)
if value:
headers[header.key] = value
conn = micro_connection.MicroConnect(
c.name,
micro.network,
micro.timer,
conf.url if c.url is not None else _import(c.code),
headers,
c.is_json,
conf.is_verbose,
conf.timeout,
_import(c.handler) if c.handler else None,
_import(c.wrapper) if c.wrapper else None,
_import(c.setup) if c.setup else None,
c.is_form,
)
for resource in c.resources.values():
optional = {}
for option in resource.optional.values():
if option.config:
optional[option.name] = config._get(
'connection.%s.resource.%s.%s' % (
c.name, resource.name, option.config)
)
else:
optional[option.name] = option.default
if resource.headers is not None:
for header in resource.headers.values():
if header.config:
resource.headers[header.key] = config._get(
'connection.%s.resource.%s.header.%s' % (
c.name,
resource.name,
header.config)
)
elif header.code:
resource.headers[header.key] = _import(header.code)
else:
resource.headers[header.key] = header.default
conn.add_resource(
resource.name,
resource.path,
resource.method,
resource.headers,
resource.is_json,
resource.is_verbose,
resource.trace,
resource.timeout,
_import(resource.handler) if resource.handler else None,
_import(resource.wrapper) if resource.wrapper else None,
_import(resource.setup) if resource.setup else None,
resource.is_form,
resource.required,
optional,
)
setattr(micro.connection, c.name, conn)
def start(micro, setup):
if setup:
_import(setup)(micro.config)
def run(micro, sleep=100, max_iterations=100):
while True:
try:
micro.network.service(
timeout=sleep/1000.0, max_iterations=max_iterations
)
micro.timer.service()
except KeyboardInterrupt:
log.info('Received shutdown command from keyboard')
break
except Exception:
log.exception('exception encountered')
def stop(teardown):
if teardown:
_import(teardown)()
if __name__ == '__main__':
import argparse
import os
import spindrift.micro as module
logging.basicConfig(level=logging.DEBUG)
aparser = argparse.ArgumentParser(
description='start a micro service',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
aparser.add_argument(
'--config', help='configuration file (default=config)'
)
aparser.add_argument(
'--no-config', dest='no_config', default=False, action='store_true',
help="don't use a config file"
)
aparser.add_argument(
'--micro', default='micro', help='micro description file'
)
aparser.add_argument(
'-c', '--config-only',
dest='config_only', action='store_true', default=False,
help='parse micro and config files and display config values'
)
aparser.add_argument(
'-n', '--connections',
dest='connections_only', action='store_true', default=False,
help='parse micro and config files and display defined connections'
)
aparser.add_argument(
'-t', '--trace',
dest='trace', action='store_true', default=False,
help='log parser fsm events'
)
args = aparser.parse_args()
micro = args.micro
if args.no_config:
config = None
elif args.config is None:
if os.path.isfile('config'):
config = 'config'
else:
config = None
else:
config = args.config
m = module.micro.load(micro, config, is_trace=args.trace)
if args.config_only:
print(m.parser.config)
elif args.connections_only:
print(m.parser.show_connections())
else:
m.setup().run()
| {
"content_hash": "2ba0eef3a916dec48e3869f15df3040a",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 79,
"avg_line_length": 30.723557692307693,
"alnum_prop": 0.5516000312964556,
"repo_name": "robertchase/spindrift",
"id": "4db328fdd3807f750d3deff8a3e2e5f3d829c366",
"size": "12781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spindrift/micro.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "872"
},
{
"name": "Python",
"bytes": "296019"
},
{
"name": "Shell",
"bytes": "1363"
},
{
"name": "TSQL",
"bytes": "1285"
}
],
"symlink_target": ""
} |
"""\
================================================
likefile - file-like interaction with components
================================================
likefile is a way to run Axon components with code that is not Axon-aware. It
does this by running the scheduler and all associated microprocesses in a
separate thread, and using a custom component to communicate if so desired.
Using this code
---------------
With a normal kamaelia system, you would start up a component and start
running the Axon scheduler as follows, either::
from Axon.Scheduler import scheduler
component.activate()
scheduler.run.runThreads()
someOtherCode()
or simply::
component.run()
someOtherCode()
In both cases, someOtherCode() only run when the scheduler exits. What do you
do if you want to (e.g.) run this alongside another external library that has
the same requirement?
Well, first we start the Axon scheduler in the background as follows::
from likefile import background
background().start()
The scheduler is now actively running in the background, and you can start
components on it from the foreground, in the same way as you would from inside
kamaelia (don't worry, activate() is threadsafe)::
component.activate()
someOtherCode()
"component" will immediately start running and processing. This is fine if it's
something non-interactive like a TCP server, but what do we do if we want to
interact with this component from someOtherCode?
In this case, we use 'likefile', instead of activating. This is a wrapper
which sits around a component and provides a threadsafe way to interact
with it, whilst it is running in the backgrounded scheduler::
from Axon.LikeFile import likefile
wrappedComponent = likefile(component)
someOtherCode()
Now, wrappedComponent is an instance of the likefile wrapper, and you can
interact with "component" by calling get() on wrappedComponent, to get data
from the outbox on "component", or by calling put(data) to put "data" into
the inbox of "component" like so::
p = likefile( SimpleHTTPClient() )
p.put("http://google.com")
google = p.get()
p.shutdown()
print ("google's homepage is", len(google), "bytes long.")
for both get() and put(), there is an optional extra parameter boxname,
allowing you to interact with different boxes, for example to send a message
with the text "RELOAD" to a component's control inbox, you would do::
wrappedComponent.put("RELOAD", "control")
wrappedComponent.get("signal")
Finally, likefile objects have a shutdown() method that sends the usual Axon
IPC shutdown messages to a wrapped component, and prevents further IO.
Advanced likefile usage
-----------------------
likefile has some optional extra arguments on creation, for handling custom
boxes outside the "basic 4". For example, to wrap a component with inboxes
called "secondary" and "tertiary" and an outbox called "debug", You would do::
p = likefile( componentMaker,
extraInboxes = ("secondary", "tertiary"),
extraOutboxes = "debug", )
Either strings or tuples of strings will work.
It may be the case that the component you are trying to wrap will link its own
inbox/outbox/signal/control, and this will result in a BoxAlreadyLinkedToDestination
exception. To stop likefile from wrapping the default 4 boxes, pass the parameter
wrapDefault = False. Note that you will need to manually wrap every box you want to use,
for example to wrap a component that has its own linkages for signal/control::
p = likefile( myComponent,
wrapDefault = False,
extraInboxes = "inbox",
extraOutboxes = "outbox", )
Diagram of likefile's functionality
-----------------------------------
likefile is constructed from components like so::
+----------------------------------+
| likefile |
+----------------------------------+
| / \
| |
InQueues OutQueues
| |
+---------+-----------------------+---------+
| \ / | |
| +---------+ +--------+ |
| | Input | Shutdown | Output | |
| | Wrapper | ------------> | | |
| | (thread)| Message |Wrapper | |
| +---------+ +--------+ |
| | / \ |
| | | |
| Inboxes Outboxes |
| | | |
| \ / | |
| +----------------------------------+ |
| | the wrapped component | |
| +----------------------------------+ |
| |
| +----------------------------------+ |
| | Some other component | |
| | that was only activated | |
| +----------------------------------+ |
| |
| AXON SCHEDULED COMPONENTS |
+-------------------------------------------+
Note 1: Threadsafeness of activate().
when a component is activated, it calls the method inherited from microprocess, which calls _addThread(self)
on an appropriate scheduler. _addThread calls wakeThread, which places the request on a threadsafe queue.
"""
import sys
from Axon.Scheduler import scheduler
from Axon.Component import component
from Axon.ThreadedComponent import threadedadaptivecommscomponent
from Axon.AdaptiveCommsComponent import AdaptiveCommsComponent
from Axon.AxonExceptions import noSpaceInBox
import threading, time, copy, warnings
try:
import Queue # Python2.6 and earlier
queue = Queue # Allow rest of source to remain unchanged
python_lang_type = 2
except ImportError:
import queue # Python 3 onwards
python_lang_type = 3
import Axon.Ipc as Ipc
queuelengths = 0
import Axon.CoordinatingAssistantTracker as cat
DEFIN = ["inbox", "control"]
DEFOUT = ["outbox", "signal"]
def addBox(names, boxMap, addBox): # XXX REVIEW: Using the function name as a parameter name
"""\
Add an extra wrapped box called name, using the addBox function provided
(either self.addInbox or self.addOutbox), and adding it to the box mapping
which is used to coordinate message routing within component wrappers.
"""
for boxname in names:
if boxname in boxMap:
raise ValueError( "%s %s already exists!" % (direction, boxname) ) # XXX REVIEW: *direction* doesn't actually exist. If this appeared in any other line besides a "raise..." line this would be a problem.
realboxname = addBox(boxname)
boxMap[boxname] = realboxname
class dummyComponent(component):
"""A dummy component. Functionality: None. Prevents the scheduler from dying immediately."""
def main(self):
while True:
self.pause()
yield 1
class background(threading.Thread):
"""A python thread which runs a scheduler. Takes the same arguments at creation that scheduler.run.runThreads accepts."""
lock = threading.Lock()
def __init__(self,slowmo=0,zap=False):
if not background.lock.acquire(False):
raise RuntimeError("only one scheduler for now can be run!")
self.slowmo = slowmo
threading.Thread.__init__(self)
self.setDaemon(True) # Die when the caller dies
self.zap = zap
def run(self):
if self.zap:
# print ("zapping", scheduler.run.threads)
X = scheduler()
scheduler.run = X
# print ("zapped", scheduler.run.threads)
cat.coordinatingassistanttracker.basecat.zap()
# print ("Here? (run)")
dummyComponent().activate() # to keep the scheduler from exiting immediately.
# print ("zoiped", scheduler.run.threads)
# TODO - what happens if the foreground calls scheduler.run.runThreads() ? We should stop this from happening.
scheduler.run.runThreads(slowmo = self.slowmo)
# print ("There?")
background.lock.release()
class componentWrapperInput(threadedadaptivecommscomponent):
"""A wrapper that takes a child component and waits on an event from the foreground, to signal that there is
queued data to be placed on the child's inboxes."""
def __init__(self, child, inboxes = DEFIN):
super(componentWrapperInput, self).__init__()
self.child = child
# This is a map from the name of the wrapped inbox on the child, to the
# Queue used to convey data into it.
self.inQueues = dict()
# This queue is used by the foreground to tell us what queue it has sent us
# data on, so that we do not need to check all our input queues,
# and also so that we can block on reading it.
self.whatInbox = queue.Queue()
self.isDead = threading.Event()
# This sets up the linkages between us and our child, avoiding extra
# box creation by connecting the "basic two" in the same way as, e.g. a pipeline.
self.childInboxMapping = dict()
addBox(inboxes, self.childInboxMapping, self.addOutbox)
if python_lang_type == 2:
items = self.childInboxMapping.iteritems()
else:
items = self.childInboxMapping.items()
for childSink, parentSource in items:
self.inQueues[childSink] = queue.Queue(self.queuelengths)
self.link((self, parentSource),(self.child, childSink))
# This outbox is used to tell the output wrapper when to shut down.
self.deathbox = self.addOutbox(str(id(self)))
def main(self):
while True:
whatInbox = self.whatInbox.get()
if not self.pollQueue(whatInbox):
# a False return indicates that we should shut down.
self.isDead.set()
# tells the foreground object that we've successfully processed a shutdown message.
# unfortunately, whether the child honours it or not is a matter of debate.
self.send(object, self.deathbox)
return
def pollQueue(self, whatInbox):
"""This method checks all the queues from the outside world, and forwards any waiting data
to the child component. Returns False if we propogated a shutdown signal, true otherwise."""
parentSource = self.childInboxMapping[whatInbox]
queue = self.inQueues[whatInbox]
while not queue.empty():
if not self.outboxes[parentSource].isFull():
msg = queue.get_nowait() # won't fail, we're the only one reading from the queue.
try:
self.send(msg, parentSource)
except noSpaceInBox as e:
raise RuntimeError("Box delivery failed despite box (earlier) reporting being not full. Is more than one thread directly accessing boxes?")
if isinstance(msg, (Ipc.shutdownMicroprocess, Ipc.producerFinished)):
# print ("Quietly dieing?")
return False
else:
# if the component's inboxes are full, do something here. Preferably not succeed.
break
return True
class componentWrapperOutput(AdaptiveCommsComponent):
"""A component which takes a child component and connects its outboxes to queues, which communicate
with the likefile component."""
def __init__(self, child, inputHandler, outboxes = DEFOUT):
super(componentWrapperOutput, self).__init__()
self.queuelengths = queuelengths
self.child = child
self.addChildren(self.child)
# This queue maps from the name of the outbox on the child which is to be wrapped,
# to the Queue which conveys that data to the foreground thread.
self.outQueues = dict()
# set up notification from the input handler to kill us when appropriate.
# we cannot rely on shutdown messages being propogated through the child.
self.isDead = inputHandler.isDead
self.deathbox = self.addInbox(str(id(self)))
self.link((inputHandler, inputHandler.deathbox), (self, self.deathbox))
# This sets up the linkages between us and our child, avoiding extra
# box creation by connecting the "basic two" in the same way as, e.g. a pipeline.
self.childOutboxMapping = dict()
addBox(outboxes, self.childOutboxMapping, self.addInbox)
if python_lang_type == 2:
items = self.childOutboxMapping.iteritems()
else:
items = self.childOutboxMapping.items()
for childSource, parentSink in items:
self.outQueues[childSource] = queue.Queue(self.queuelengths)
self.link((self.child, childSource),(self, parentSink))
def main(self):
# print ("componentWrapperOutput", self.child)
self.child.activate()
while True:
self.pause()
yield 1
self.sendPendingOutput()
if self.dataReady(self.deathbox):
return
def sendPendingOutput(self):
"""This method will take any outgoing data sent to us from a child component and stick it on a queue
to the outside world."""
if python_lang_type == 2:
items = self.childOutboxMapping.iteritems()
else:
items = self.childOutboxMapping.items()
for childSource, parentSink in items:
queue = self.outQueues[childSource]
while self.dataReady(parentSink):
if not queue.full():
msg = self.recv(parentSink)
# TODO - what happens when the wrapped component terminates itself? We keep on going. Not optimal.
queue.put_nowait(msg)
else:
break
# permit a horrible backlog to build up inside our boxes. What could go wrong?
class likefile(object):
"""An interface to the message queues from a wrapped component, which is activated on a backgrounded scheduler."""
def __init__(self, child, extraInboxes = (), extraOutboxes = (), wrapDefault = True):
if background.lock.acquire(False):
background.lock.release()
raise AttributeError("no running scheduler found.")
# prevent a catastrophe: if we treat a string like "extrainbox" as a tuple, we end up adding one new inbox per
# letter. TODO - this is unelegant code.
if not isinstance(extraInboxes, tuple):
extraInboxes = (extraInboxes, )
if not isinstance(extraOutboxes, tuple):
extraOutboxes = (extraOutboxes, )
# If the component to wrap is missing, say, "inbox", then don't fail but silently neglect to wrap it.
validInboxes = list(type(child).Inboxes.keys())
validOutboxes = list(type(child).Outboxes.keys())
inboxes = []
outboxes = []
if wrapDefault:
for i in DEFIN:
if i in validInboxes: inboxes.append(i)
for i in DEFOUT:
if i in validOutboxes: outboxes.append(i)
inboxes += list(extraInboxes)
outboxes += list(extraOutboxes)
try:
inputComponent = componentWrapperInput(child, inboxes)
except KeyError as e:
raise KeyError ('component to wrap has no such inbox: %s' % e)
try:
outputComponent = componentWrapperOutput(child, inputComponent, outboxes)
except KeyError as e:
del inputComponent
raise KeyError('component to wrap has no such outbox: %s' % e)
self.inQueues = copy.copy(inputComponent.inQueues)
self.outQueues = copy.copy(outputComponent.outQueues)
# reaching into the component and its child like this is threadsafe since it has not been activated yet.
self.inputComponent = inputComponent
self.outputComponent = outputComponent
inputComponent.activate()
outputComponent.activate()
self.alive = True
# methods passed through from the queue.
def empty(self, boxname = "outbox"):
"""Return True if there is no data pending collection on boxname, False otherwise."""
return self.outQueues[boxname].empty()
def qsize(self, boxname = "outbox"):
"""Returns the approximate number of pending data items awaiting collection from boxname. Will never be smaller than the actual amount."""
return self.outQueues[boxname].qsize()
def get_nowait(self, boxname = "outbox"):
"""Equivalent to get(boxname, False)"""
return self.get(boxname, blocking = False)
def anyReady(self):
names = []
for boxname in list(self.outQueues.keys()):
if self.qsize(boxname):
names.append(boxname)
if names != []:
return names
return None
def get(self, boxname = "outbox", blocking = True, timeout = 86400):
"""Performs a blocking read on the queue corresponding to the named outbox on the wrapped component.
raises AttributeError if the likefile is not alive. Optional parameters blocking and timeout function
the same way as in Queue objects, since that is what's used under the surface."""
# print ("self.get boxname ",boxname,"blocking =",blocking,"timeout=",timeout)
if self.alive:
return self.outQueues[boxname].get(blocking, timeout)
# TODO - remove this.
# Specifying any timeout allows ctrl-c to interrupt the wait, even if the timeout is excessive.
# This is one day. this may be a problem, in which case retry after an "empty" exception is raised.
else:
raise AttributeError("shutdown was previously called, or we were never activated.")
def put(self, msg, boxname = "inbox"):
"""Places an object on a queue which will be directed to a named inbox on the wrapped component."""
# print ("self.put msg", repr(msg), "boxname", boxname)
if self.alive:
queue = self.inQueues[boxname]
queue.put_nowait(msg)
self.inputComponent.whatInbox.put_nowait(boxname)
else:
raise AttributeError("shutdown was previously called, or we were never activated.")
def shutdown(self):
"""Sends terminatory signals to the wrapped component, and shut down the componentWrapper.
will warn if the shutdown took too long to confirm in action."""
# TODO - what if the wrapped component has no control box?
if self.alive:
self.put(Ipc.shutdown(), "control") # legacy support.
self.put(Ipc.producerFinished(), "control") # some components only honour this one
self.put(Ipc.shutdownMicroprocess(), "control") # should be last, this is what we honour
else:
raise AttributeError("shutdown was previously called, or we were never activated.")
self.inputComponent.isDead.wait(1)
if not self.inputComponent.isDead.isSet(): # we timed out instead of someone else setting the flag
warnings.warn("Timed out waiting on shutdown confirmation, may not be dead.")
self.alive = False
def __del__(self):
if self.alive:
self.shutdown()
if __name__ == "__main__":
#doesn't actually work as of now
background = background().start()
time.sleep(0.1)
from Kamaelia.Protocol.HTTP.HTTPClient import SimpleHTTPClient
import time
p = likefile(SimpleHTTPClient())
p.put("http://google.com")
p.put("http://slashdot.org")
p.put("http://whatismyip.org")
google = p.get()
slashdot = p.get()
whatismyip = p.get()
p.shutdown()
print ("google is", len(google), "bytes long, and slashdot is", len(slashdot), "bytes long. Also, our IP address is:", whatismyip)
| {
"content_hash": "0555934d255cbfa7555669c3f9879796",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 218,
"avg_line_length": 41.92946058091286,
"alnum_prop": 0.6158832261256804,
"repo_name": "bbc/kamaelia",
"id": "ea8844c1e00940b25bc9b65b0c6f1b198893978f",
"size": "21091",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Code/Python/Axon/Axon/experimental/_pprocess_support.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
} |
"""Contains ligand-specific objects and functions."""
from collections import Counter
from . import gtop
from . import pdb
from .interactions import Interaction, get_interaction_by_id
from .exceptions import NoSuchLigandError
from .shared import DatabaseLink, strip_html
def get_ligand_by_id(ligand_id):
"""Returns a Ligand object of the ligand with the given ID.
:param int ligand_id: The GtoP ID of the Ligand desired.
:rtype: :py:class:`Ligand`
:raises: :class:`.NoSuchLigandError` if no such ligand exists in the database"""
if not isinstance(ligand_id, int):
raise TypeError("ligand_id must be int, not '%s'" % str(ligand_id))
json_data = gtop.get_json_from_gtop("ligands/%i" % ligand_id)
if json_data:
return Ligand(json_data)
else:
raise NoSuchLigandError("There is no ligand with ID %i" % ligand_id)
def get_all_ligands():
"""Returns a list of all ligands in the Guide to PHARMACOLOGY database. This
can take a few seconds.
:returns: list of :py:class:`Ligand` objects"""
json_data = gtop.get_json_from_gtop("ligands")
return [Ligand(l) for l in json_data]
def get_ligands_by(criteria):
"""Get all ligands which specify the criteria dictionary.
:param dict criteria: A dictionary of `field=value` pairs. See the\
`GtoP ligand web services page <http://www.guidetopharmacology.org/\
webServices.jsp#ligands>`_ for key/value pairs which can be supplied.
:returns: list of :py:class:`Ligand` objects."""
if not isinstance(criteria, dict):
raise TypeError("criteria must be dict, not '%s'" % str(criteria))
search_string = "&".join(["%s=%s" % (key, criteria[key]) for key in criteria])
json_data = gtop.get_json_from_gtop("ligands?%s" % search_string)
if json_data:
return [Ligand(l) for l in json_data]
else:
return []
def get_ligand_by_name(name):
"""Returns the ligand which matches the name given.
:param str name: The name of the ligand to search for. Note that synonyms \
will not be searched.
:rtype: :py:class:`Ligand`
:raises: :class:`.NoSuchLigandError` if no such ligand exists in the database."""
if not isinstance(name, str):
raise TypeError("name must be str, not '%s'" % str(name))
ligands = get_ligands_by({"name": name})
if ligands:
return ligands[0]
else:
raise NoSuchLigandError("There is no ligand with name %s" % name)
def get_ligands_by_smiles(smiles, search_type="exact", cutoff=0.8):
"""Search for ligands by SMILES string.
:param str smiles: The SMILES string to search with.
:param str search_type: The type of search. Viable options are ``"exact"``, \
``"substructure"`` or ``"similarity"``.
:param float cutoff: If performing a similarity search, this is the cutoff \
used for similarity. The default is 0.8 and the maximum is 1.
:returns: list of :py:class:`Ligand` objects."""
if not isinstance(smiles, str):
raise TypeError("smiles must be str, not '%s'" % str(smiles))
if not isinstance(search_type, str):
raise TypeError("search_type must be str, not '%s'" % str(search_type))
if search_type not in ["exact", "substructure", "similarity"]:
raise ValueError("'%s' is not a valud search type" % search_type)
if not isinstance(cutoff, int) and not isinstance(cutoff, float):
raise TypeError("cutoff must be numeric, not '%s'" % str(cutoff))
if not 0 <= cutoff <= 1:
raise ValueError("cutoff must be between 0 and 1, not %s" % (str(cutoff)))
query = "ligands/%s?smiles=%s%s" % (
search_type,
smiles,
("&similarityGt=%i" % (cutoff * 100)) if search_type == "similarity" else ""
)
json_data = gtop.get_json_from_gtop(query)
if json_data:
return [Ligand(l) for l in json_data]
else:
return []
class Ligand:
"""A Guide to PHARMACOLOGY ligand object.
:param json_data: A dictionary obtained from the web services."""
def __init__(self, json_data):
self.json_data = json_data
self._ligand_id = json_data["ligandId"]
self._name = json_data["name"]
self._abbreviation = json_data["abbreviation"] if json_data["abbreviation"] else None
self._inn = json_data["inn"]
self._ligand_type = json_data["type"]
self._species = json_data["species"]
self._radioactive = json_data["radioactive"]
self._labelled = json_data["labelled"]
self._approved = json_data["approved"]
self._withdrawn = json_data["withdrawn"]
self._approval_source = json_data["approvalSource"]
self._subunit_ids = json_data["subunitIds"]
self._complex_ids = json_data["complexIds"]
self._prodrug_ids = json_data["prodrugIds"]
self._active_drug_ids = json_data["activeDrugIds"]
def __repr__(self):
return "<Ligand %i (%s)>" % (self._ligand_id, self._name)
def ligand_id(self):
"""Returns the ligand's GtoP ID.
:rtype: int"""
return self._ligand_id
@strip_html
def name(self):
"""Returns the ligand's name.
:param bool strip_html: If ``True``, the name will have HTML entities stripped.
:rtype: str"""
return self._name
@strip_html
def abbreviation(self):
"""Returns the ligand's abbreviated name.
:param bool strip_html: If ``True``, the abbreviation will have HTML entities stripped.
:rtype: str"""
return self._abbreviation
@strip_html
def inn(self):
"""Returns the ligand's INN name.
:param bool strip_html: If ``True``, the name will have HTML entities stripped.
:rtype: str"""
return self._inn
def ligand_type(self):
"""Returns the ligand's type.
:rtype: str"""
return self._ligand_type
def species(self):
"""Returns the ligand's species, where appropriate.
:rtype: str"""
return self._species
def radioactive(self):
"""Returns True if the ligand is radioactive.
:rtype: bool"""
return self._radioactive
def labelled(self):
"""Returns True if the ligand is labelled.
:rtype: bool"""
return self._labelled
def approved(self):
"""Returns True if the ligand is approved.
:rtype: bool"""
return self._approved
def withdrawn(self):
"""Returns True if the ligand has been withdrawn.
:rtype: bool"""
return self._withdrawn
@strip_html
def approval_source(self):
"""Returns the regulatory body that approved the ligand, where appropriate.
:param bool strip_html: If ``True``, the name will have HTML entities stripped.
:rtype: str"""
return self._approval_source
def subunit_ids(self):
"""Returns the the ligand IDs of all ligands which are subunits of this
target.
:returns: list of ``int``"""
return self._subunit_ids
def subunits(self):
"""Returns a list of all ligands which are subunits of this ligand.
:returns: list of :py:class:`Ligand` objects"""
return [get_ligand_by_id(id_) for id_ in self._subunit_ids]
def complex_ids(self):
"""Returns the the ligand IDs of all ligands of which this target is a
subunit.
:returns: list of ``int``"""
return self._complex_ids
def complexes(self):
"""Returns a list of all ligands of which this ligand is a subunit.
:returns: list of :py:class:`Ligand` objects"""
return [get_ligand_by_id(id_) for id_ in self._complex_ids]
def prodrug_ids(self):
"""Returns the the ligand IDs of all ligands which are prodrugs of this
ligand.
:returns: list of ``int``"""
return self._prodrug_ids
def prodrugs(self):
"""Returns a list of all ligands which are prodrugs of this ligand.
:returns: list of :py:class:`Ligand` objects"""
return [get_ligand_by_id(id_) for id_ in self._prodrug_ids]
def active_drug_ids(self):
"""Returns the the ligand IDs of all ligands which are active
equivalents of this ligand.
:returns: list of ``int``"""
return self._active_drug_ids
def active_drugs(self):
"""Returns a list of all ligands which are active equivalents of this ligand.
:returns: list of :py:class:`Ligand` objects"""
return [get_ligand_by_id(id_) for id_ in self._active_drug_ids]
def iupac_name(self):
"""Returns the ligand's IUPAC name.
:rtype: str"""
return self._get_structure_json().get("iupacName")
def smiles(self):
"""Returns the ligand's SMILES string.
:rtype: str"""
return self._get_structure_json().get("smiles")
def inchi(self):
"""Returns the ligand's InChI string.
:rtype: str"""
return self._get_structure_json().get("inchi")
def inchi_key(self):
"""Returns the ligand's InChI key.
:rtype: str"""
return self._get_structure_json().get("inchiKey")
def one_letter_sequence(self):
"""Returns the ligand's single letter amino acid sequence where appropriate.
:rtype: str"""
return self._get_structure_json().get("oneLetterSeq")
def three_letter_sequence(self):
"""Returns the ligand's three letter amino acid sequence where appropriate.
:rtype: str"""
return self._get_structure_json().get("threeLetterSeq")
def post_translational_modifications(self):
"""Returns any post-translational modifications.
:rtype: str"""
return self._get_structure_json().get("postTranslationalModifications")
def chemical_modifications(self):
"""Returns any chemical modifications.
:rtype: str"""
return self._get_structure_json().get("chemicalModifications")
def hydrogen_bond_acceptors(self):
"""Returns the number of hydrogen bond accepting atoms.
:rtype: int"""
return self._get_molecular_json().get("hydrogenBondAcceptors")
def hydrogen_bond_donors(self):
"""Returns the number of hydrogen bond donor atoms.
:rtype: int"""
return self._get_molecular_json().get("hydrogenBondDonors")
def rotatable_bonds(self):
"""Returns the number of rotatable bonds in the ligand.
:rtype: int"""
return self._get_molecular_json().get("rotatableBonds")
def topological_polar_surface_area(self):
"""Returns the polar surface area of the ligand in Angstroms.
:rtype: float"""
return self._get_molecular_json().get("topologicalPolarSurfaceArea")
def molecular_weight(self):
"""Returns the ligand's mass in Daltons.
:rtype: float"""
return self._get_molecular_json().get("molecularWeight")
def log_p(self):
"""Returns the logP value of the ligand.
:rtype: int"""
return self._get_molecular_json().get("logP")
def lipinski_rules_broken(self):
"""Returns the number of Lipinski's Rules the ligand breaks.
:rtype: int"""
return self._get_molecular_json().get("lipinskisRuleOfFive")
@strip_html
def synonyms(self):
"""Returns the number ligand's synonyms
:returns: list of ``str``"""
return [synonym["name"] for synonym in self._get_synonym_json()]
def general_comments(self):
"""Returns general comments pertaining to the ligand.
:rtype: str"""
return self._get_comments_json().get("comments")
def bioactivity_comments(self):
"""Returns comments pertaining to bioactivity.
:rtype: str"""
return self._get_molecular_json().get("bioactivityComments")
def clinical_use_comments(self):
"""Returns comments pertaining to clinical use.
:rtype: str"""
return self._get_molecular_json().get("clinicalUse")
def mechanism_of_action_comments(self):
"""Returns comments pertaining to mechanism.
:rtype: str"""
return self._get_molecular_json().get("mechanismOfAction")
def absorption_and_distribution_comments(self):
"""Returns comments pertaining to absorption and distribution.
:rtype: str"""
return self._get_molecular_json().get("absorptionAndDistribution")
def metabolism_comments(self):
"""Returns comments pertaining to metabolism.
:rtype: str"""
return self._get_molecular_json().get("metabolism")
def elimination_comments(self):
"""Returns comments pertaining to elimination from the body.
:rtype: str"""
return self._get_molecular_json().get("elimination")
def population_pharmacokinetics_comments(self):
"""Returns comments pertaining to population pharmacokinetics.
:rtype: str"""
return self._get_molecular_json().get("populationPharmacokinetics")
def organ_function_impairments_comments(self):
"""Returns comments pertaining to organ function impairment.
:rtype: str"""
return self._get_molecular_json().get("organFunctionImpairment")
def mutations_and_pathophysiology_comments(self):
"""Returns comments pertaining to mutations and pathophysiology.
:rtype: str"""
return self._get_molecular_json().get("mutationsAndPathophysiology")
def database_links(self):
"""Returns a list of database links for this ligand.
:rtype: list of :py:class:`.DatabaseLink`"""
return [DatabaseLink(link_json) for link_json in self._get_database_json()]
def interactions(self):
"""Returns a list of interactions for this ligand.
:rtype: list of :py:class:`.Interaction`"""
return [Interaction(interaction_json) for interaction_json in self._get_interactions_json()]
get_interaction_by_id = get_interaction_by_id
"""Returns an Interaction object of a given ID belonging to the ligand.
:param int interaction_id: The interactions's ID.
:rtype: :py:class:`.Interaction`
:raises: :class:`.NoSuchInteractionError`: if no such interaction exists in the database."""
def targets(self):
"""Returns a list of all targets which this ligand interacts with.
:returns: list of :py:class:`.Target` objects"""
targets = []
for interaction in self.interactions():
target = interaction.target()
if target not in targets:
targets.append(target)
return targets
@pdb.ask_about_molecupy
def gtop_pdbs(self):
"""Returns a list of PDBs which the Guide to PHARMACOLOGY says contain
this ligand.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
pdbs = []
for interaction in self.interactions():
for pdb in interaction.gtop_pdbs():
if pdb not in pdbs:
pdbs.append(pdb)
return pdbs
@pdb.ask_about_molecupy
def smiles_pdbs(self, search_type="exact"):
"""Queries the RSCB PDB database with the ligand's SMILES string.
:param str search_type: The type of search to run - whether exact matches\
only should be returned.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
if self.smiles():
xml = pdb.query_rcsb("smilesQuery", {
"smiles": self.smiles(),
"search_type": search_type
})
if xml:
ligand_elements = list(xml[0])
return [element.attrib["structureId"] for element in ligand_elements]
else:
return []
else:
return []
@pdb.ask_about_molecupy
def inchi_pdbs(self):
"""Queries the RSCB PDB database with the ligand's InChI string.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
if self.inchi():
results = pdb.query_rcsb_advanced("ChemCompDescriptorQuery", {
"descriptor": self.inchi(),
"descriptorType": "InChI"
})
return results if results else []
else:
return []
@pdb.ask_about_molecupy
def name_pdbs(self, comparator="equals"):
"""Queries the RSCB PDB database with the ligand's name.
:param str comparator: The type of search to run - whether exact matches\
only should be returned, or substrings etc.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
results = pdb.query_rcsb_advanced("ChemCompNameQuery", {
"comparator": comparator.title(),
"name": self.name(),
"polymericType": "Any"
})
return results if results else []
@pdb.ask_about_molecupy
def sequence_pdbs(self):
"""Queries the RSCB PDB database with the ligand's amino acid sequence,\
if that ligand is a peptide.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
if self.one_letter_sequence():
results = pdb.query_rcsb_advanced("SequenceQuery", {
"sequence": self.one_letter_sequence(),
"eCutOff": "0.01",
"searchTool": "blast",
"sequenceIdentityCutoff": "100"
})
return results if results else []
else:
return []
@pdb.ask_about_molecupy
def het_pdbs(self):
"""Queries the RSCB PDB database with the ligand's amino acid sequence,\
if that ligand is a peptide.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
het = [h for h in self.database_links() if "PDB" in h.database()]
if het:
results = pdb.query_rcsb_advanced("ChemCompIdQuery", {
"chemCompId": het[0].accession(),
})
return results if results else []
else:
return []
@pdb.ask_about_molecupy
def all_external_pdbs(self):
"""Queries the RSCB PDB database by all parameters.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
return list(set(
self.smiles_pdbs() +
self.inchi_pdbs() +
self.name_pdbs() +
self.sequence_pdbs() +
self.het_pdbs()
))
@pdb.ask_about_molecupy
def all_pdbs(self):
"""Get a list of PDB codes using all means available - annotated and
external.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
return list(set(
self.gtop_pdbs() +
self.all_external_pdbs()
))
def find_in_pdb_by_smiles(self, molecupy_pdb):
"""Searches for the ligand in a `molecuPy <http://molecupy.readthedocs.io>`_
PDB object by SMILES string and returns the small molecule it finds.
:param molecupy_pdb: The molecuPy PDB object.
:rtype: ``SmallMolecule``"""
if self.smiles():
formula = Counter([char.upper() for char in self.smiles()
if char.isalpha() and char.upper() != "H"])
matches = []
for molecule in sorted(molecupy_pdb.model().small_molecules(), key=lambda m: m.molecule_id()):
if molecule.formula() == formula:
matches.append(molecule)
if matches:
return sorted(matches,key=lambda m: len(m.bind_site().residues())
if m.bind_site() else 0, reverse = True)[0]
def find_in_pdb_by_name(self, molecupy_pdb):
"""Searches for the ligand in a `molecuPy <http://molecupy.readthedocs.io>`_
PDB object by ligand name and returns the small molecule it finds.
:param molecupy_pdb: The molecuPy PDB object.
:rtype: ``SmallMolecule``"""
if self.name():
matches = []
for molecule in sorted(molecupy_pdb.model().small_molecules(), key=lambda m: m.molecule_id()):
molecule_name = molecupy_pdb.data_file().het_names().get(molecule.molecule_name())
if molecule_name and self.name().lower() == molecule_name.lower():
matches.append(molecule)
if matches:
return sorted(matches,key=lambda m: len(m.bind_site().residues())
if m.bind_site() else 0, reverse = True)[0]
def find_in_pdb_by_mass(self, molecupy_pdb):
"""Searches for the ligand in a `molecuPy <http://molecupy.readthedocs.io>`_
PDB object by ligand mass and returns the small molecule it finds.
:param molecupy_pdb: The molecuPy PDB object.
:rtype: ``SmallMolecule``"""
if self.molecular_weight():
molecules = sorted(
list(molecupy_pdb.model().small_molecules()),
key=lambda k: abs(k.mass() - self.molecular_weight())
)
if molecules and -40 < (molecules[0].mass() - self.molecular_weight()) < 40:
return molecules[0]
def find_in_pdb_by_peptide_string(self, molecupy_pdb):
"""Searches for the ligand in a `molecuPy <http://molecupy.readthedocs.io>`_
PDB object by peptide sequence and returns the chain it finds.
:param molecupy_pdb: The molecuPy PDB object.
:rtype: ``Chain``"""
if self.one_letter_sequence():
for chain in molecupy_pdb.model().chains():
if self.one_letter_sequence() in chain.sequence_string() and 0.9 <= (
len(self.one_letter_sequence()) / len(chain.sequence_string())
) <= 1:
return chain
def _get_structure_json(self):
json_object = gtop.get_json_from_gtop(
"ligands/%i/structure" % self._ligand_id
)
return json_object if json_object else {}
def _get_molecular_json(self):
json_object = gtop.get_json_from_gtop(
"ligands/%i/molecularProperties" % self._ligand_id
)
return json_object if json_object else {}
def _get_synonym_json(self):
json_object = gtop.get_json_from_gtop(
"ligands/%i/synonyms" % self._ligand_id
)
return json_object if json_object else []
def _get_comments_json(self):
json_object = gtop.get_json_from_gtop(
"ligands/%i/comments" % self._ligand_id
)
return json_object if json_object else {}
def _get_database_json(self):
json_object = gtop.get_json_from_gtop(
"ligands/%i/databaseLinks" % self._ligand_id
)
return json_object if json_object else []
def _get_interactions_json(self):
json_object = gtop.get_json_from_gtop(
"ligands/%i/interactions" % self._ligand_id
)
return json_object if json_object else []
| {
"content_hash": "bc5763015bc506f798256910f88dcc49",
"timestamp": "",
"source": "github",
"line_count": 802,
"max_line_length": 106,
"avg_line_length": 29.602244389027433,
"alnum_prop": 0.6074301840697528,
"repo_name": "samirelanduk/pygtop",
"id": "e2d3f398bbfa12638f978cfeb0282527b0da0fd1",
"size": "23741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygtop/ligands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147850"
}
],
"symlink_target": ""
} |
from core.notify.plugins import base
import logging
class Plugin(base.Plugin):
subscribe = ['DELETE']
name = 'Cleanup Plugin'
description = 'Cleans the information after deleting an element'
author = 'Slavey Karadzhov <slaff@linux-bg.org>'
version = '0.1'
"""
@param int user
@param Request request
@param Resource resource
@param string event
@param string uri - the path in the global resource tree
@param string path - the local resource path
@param array meta
"""
def process(self, user, request, resource, path, meta):
# Delete here all the views after deleting a resource
pass
| {
"content_hash": "42b2648297fc76026af3e185aba6ef54",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 29,
"alnum_prop": 0.6656671664167916,
"repo_name": "slaff/attachix",
"id": "72cbf0c29c434c5472c3f807a81288edf5ca902b",
"size": "667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/core/notify/plugins/available/cleanup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45563"
},
{
"name": "JavaScript",
"bytes": "6214571"
},
{
"name": "Python",
"bytes": "299910"
},
{
"name": "Shell",
"bytes": "3519"
},
{
"name": "XSLT",
"bytes": "10984"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import theano
import theano.tensor as T
import types
def softmax(x):
return T.nnet.softmax(x)
def time_distributed_softmax(x):
xshape = x.shape
X = x.reshape((xshape[0] * xshape[1], xshape[2]))
return T.nnet.softmax(X).reshape(xshape)
def softplus(x):
return T.nnet.softplus(x)
def relu(x):
return (x + abs(x)) / 2.0
def tanh(x):
return T.tanh(x)
def sigmoid(x):
return T.nnet.sigmoid(x)
def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x)
def linear(x):
return x
from .utils.generic_utils import get_from_module
def get(identifier):
return get_from_module(identifier, globals(), 'activation function') | {
"content_hash": "69a447def59afeeaec2c519937452040",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 72,
"avg_line_length": 20.38235294117647,
"alnum_prop": 0.6810966810966811,
"repo_name": "aleju/keras",
"id": "a7eb78438238a704c97af0b171f66fe9ca718ed3",
"size": "693",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "keras/activations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "195188"
}
],
"symlink_target": ""
} |
from app import app
# The app will spawn twice because of the reloader set this to stop the behaviour
# ref. http://stackoverflow.com/questions/25504149/why-does-running-the-flask-dev-server-run-itself-twice
# app.run(port=5000, debug=True, host='0.0.0.0', use_reloader=False)
# start server and allow binding to local host
# app.run(host='127.0.0.1', port=5000, debug=True, use_evalex=False, use_reloader=False)
# start server and allow binding to any address
app.run(host='0.0.0.0', port=5000, debug=True, use_evalex=False, use_reloader=False)
| {
"content_hash": "86d3c95be40578b2501bc5e5bbb181de",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 105,
"avg_line_length": 45.916666666666664,
"alnum_prop": 0.7477313974591652,
"repo_name": "Sotera/newman",
"id": "35e29c2c57a97b9137a80f6dddfc5047c7196258",
"size": "570",
"binary": false,
"copies": "1",
"ref": "refs/heads/update_ex_5.x",
"path": "server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "465910"
},
{
"name": "CoffeeScript",
"bytes": "3296"
},
{
"name": "HTML",
"bytes": "363448"
},
{
"name": "JavaScript",
"bytes": "5068065"
},
{
"name": "Python",
"bytes": "167045"
},
{
"name": "Shell",
"bytes": "4407"
}
],
"symlink_target": ""
} |
import re
try:
# Python >=2.6
from functools import reduce
except ImportError:
# Python <2.6
pass
import sys
import operator
_re_trail = re.compile('\((?P<txn_body>[a-z_]*), (?P<filename>[a-z_\-./]*), (?P<lineno>[0-9]*), (?P<txn>0|1)\): (?P<ops>.*)')
_re_table_op = re.compile('\(([a-z]*), ([a-z]*)\)')
_seperator = '------------------------------------------------------------\n'
def parse_trails_log(infile):
trails = []
lineno = 0
for line in infile.readlines():
m = _re_trail.match(line)
lineno = lineno + 1
if not m:
sys.stderr.write('Invalid input, line %u:\n%s\n' % (lineno, line))
sys.exit(1)
txn = int(m.group('txn'))
if not txn:
### We're not interested in trails that don't use txns at this point.
continue
txn_body = (m.group('txn_body'), m.group('filename'),
int(m.group('lineno')))
trail = _re_table_op.findall(m.group('ops'))
trail.reverse()
if not trail:
sys.stderr.write('Warning! Empty trail at line %u:\n%s' % (lineno, line))
trails.append((txn_body, trail))
return trails
def output_summary(trails, outfile):
ops = []
for (txn_body, trail) in trails:
ops.append(len(trail))
ops.sort()
total_trails = len(ops)
total_ops = reduce(operator.add, ops)
max_ops = ops[-1]
median_ops = ops[total_trails / 2]
average_ops = float(total_ops) / total_trails
outfile.write(_seperator)
outfile.write('Summary\n')
outfile.write(_seperator)
outfile.write('Total number of trails: %10i\n' % total_trails)
outfile.write('Total number of ops: %10i\n' % total_ops)
outfile.write('max ops/trail: %10i\n' % max_ops)
outfile.write('median ops/trail: %10i\n' % median_ops)
outfile.write('average ops/trail: %10.2f\n' % average_ops)
outfile.write('\n')
# custom compare function
def _freqtable_cmp(a_b, c_d):
(a, b) = a_b
(c, d) = c_d
c = cmp(d, b)
if not c:
c = cmp(a, c)
return c
def list_frequencies(list):
"""
Given a list, return a list composed of (item, frequency)
in sorted order
"""
counter = {}
for item in list:
counter[item] = counter.get(item, 0) + 1
frequencies = list(counter.items())
frequencies.sort(_freqtable_cmp)
return frequencies
def output_trail_length_frequencies(trails, outfile):
ops = []
for (txn_body, trail) in trails:
ops.append(len(trail))
total_trails = len(ops)
frequencies = list_frequencies(ops)
outfile.write(_seperator)
outfile.write('Trail length frequencies\n')
outfile.write(_seperator)
outfile.write('ops/trail frequency percentage\n')
for (r, f) in frequencies:
p = float(f) * 100 / total_trails
outfile.write('%4i %6i %5.2f\n' % (r, f, p))
outfile.write('\n')
def output_trail(outfile, trail, column = 0):
### Output the trail itself, in its own column
if len(trail) == 0:
outfile.write('<empty>\n')
return
line = str(trail[0])
for op in trail[1:]:
op_str = str(op)
if len(line) + len(op_str) > 75 - column:
outfile.write('%s,\n' % line)
outfile.write(''.join(' ' * column))
line = op_str
else:
line = line + ', ' + op_str
outfile.write('%s\n' % line)
outfile.write('\n')
def output_trail_frequencies(trails, outfile):
total_trails = len(trails)
ttrails = []
for (txn_body, trail) in trails:
ttrails.append((txn_body, tuple(trail)))
frequencies = list_frequencies(ttrails)
outfile.write(_seperator)
outfile.write('Trail frequencies\n')
outfile.write(_seperator)
outfile.write('frequency percentage ops/trail trail\n')
for (((txn_body, file, line), trail), f) in frequencies:
p = float(f) * 100 / total_trails
outfile.write('-- %s - %s:%u --\n' % (txn_body, file, line))
outfile.write('%6i %5.2f %4i ' % (f, p, len(trail)))
output_trail(outfile, trail, 37)
def output_txn_body_frequencies(trails, outfile):
bodies = []
for (txn_body, trail) in trails:
bodies.append(txn_body)
total_trails = len(trails)
frequencies = list_frequencies(bodies)
outfile.write(_seperator)
outfile.write('txn_body frequencies\n')
outfile.write(_seperator)
outfile.write('frequency percentage txn_body\n')
for ((txn_body, file, line), f) in frequencies:
p = float(f) * 100 / total_trails
outfile.write('%6i %5.2f %s - %s:%u\n'
% (f, p, txn_body, file, line))
def usage(pgm):
w = sys.stderr.write
w("%s: a program for analyzing Subversion trail usage statistics.\n" % pgm)
w("\n")
w("Usage:\n")
w("\n")
w(" Compile Subversion with -DSVN_FS__TRAIL_DEBUG, which will cause it\n")
w(" it to print trail statistics to stderr. Save the stats to a file,\n")
w(" invoke %s on the file, and ponder the output.\n" % pgm)
w("\n")
if __name__ == '__main__':
if len(sys.argv) > 2:
sys.stderr.write("Error: too many arguments\n\n")
usage(sys.argv[0])
sys.exit(1)
if len(sys.argv) == 1:
infile = sys.stdin
else:
try:
infile = open(sys.argv[1])
except (IOError):
sys.stderr.write("Error: unable to open '%s'\n\n" % sys.argv[1])
usage(sys.argv[0])
sys.exit(1)
trails = parse_trails_log(infile)
output_summary(trails, sys.stdout)
output_trail_length_frequencies(trails, sys.stdout)
output_trail_frequencies(trails, sys.stdout)
output_txn_body_frequencies(trails, sys.stdout)
| {
"content_hash": "698ad97032210369b1c9bbaa1c4f2cf0",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 125,
"avg_line_length": 26.536585365853657,
"alnum_prop": 0.6066176470588235,
"repo_name": "jmckaskill/subversion",
"id": "9717c6cee131ed3005bae728f1df87234ccb0ac9",
"size": "6316",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/dev/trails.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "16930676"
},
{
"name": "C#",
"bytes": "6994"
},
{
"name": "C++",
"bytes": "509265"
},
{
"name": "Emacs Lisp",
"bytes": "467395"
},
{
"name": "Java",
"bytes": "1463304"
},
{
"name": "Objective-C",
"bytes": "255507"
},
{
"name": "Perl",
"bytes": "227763"
},
{
"name": "Python",
"bytes": "5030884"
},
{
"name": "Ruby",
"bytes": "436873"
},
{
"name": "Shell",
"bytes": "260031"
},
{
"name": "VimL",
"bytes": "4070"
}
],
"symlink_target": ""
} |
"""
Extended Api implementation with an application-specific helpers
----------------------------------------------------------------
"""
from six import iteritems
from flask_restplus_patched import Api as BaseApi
from .namespace import Namespace
class Api(BaseApi):
"""
Having app-specific handlers here.
"""
def namespace(self, *args, **kwargs):
# The only purpose of this method is to pass custom Namespace class
_namespace = Namespace(*args, **kwargs)
self.namespaces.append(_namespace)
return _namespace
def add_oauth_scope(self, scope_name, scope_description):
for authorization_settings in self.authorizations.values():
if authorization_settings['type'].startswith('oauth'):
assert scope_name not in authorization_settings['scopes'], \
"OAuth scope %s already exists" % scope_name
authorization_settings['scopes'][scope_name] = scope_description
def add_namespace(self, ns):
# Rewrite security rules for OAuth scopes since Namespaces don't have
# enough information about authorization methods.
for resource, _, _ in ns.resources:
for method in resource.methods:
method_func = getattr(resource, method.lower())
if (
hasattr(method_func, '__apidoc__')
and
'security' in method_func.__apidoc__
and
'__oauth__' in method_func.__apidoc__['security']
):
oauth_scopes = method_func.__apidoc__['security']['__oauth__']['scopes']
method_func.__apidoc__['security'] = {
auth_name: oauth_scopes
for auth_name, auth_settings in iteritems(self.authorizations)
if auth_settings['type'].startswith('oauth')
}
super(Api, self).add_namespace(ns)
| {
"content_hash": "dce775706a6ecff19e452dc8ffc3383c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 92,
"avg_line_length": 39.568627450980394,
"alnum_prop": 0.5490584737363726,
"repo_name": "millen1m/flask-restplus-server-example",
"id": "0eae20ce25e5b15a7e2832c41ae6b347143289e5",
"size": "2036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/extensions/api/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10615"
},
{
"name": "JavaScript",
"bytes": "5468"
},
{
"name": "Mako",
"bytes": "1637"
},
{
"name": "Python",
"bytes": "224314"
}
],
"symlink_target": ""
} |
class SeleneException(Exception):
pass
class TemplateLoaderException(SeleneException):
pass
class TopLevelLoaderException(TemplateLoaderException):
pass
class UINotFoundException(SeleneException):
pass
class UINestedCallException(SeleneException):
pass
| {
"content_hash": "2f519f5e60a057f099eb932439c1f104",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 15.61111111111111,
"alnum_prop": 0.7935943060498221,
"repo_name": "whiteclover/Medoly",
"id": "d409c0062bebb460d092b1af3f2b67aa24004d51",
"size": "877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "medoly/template/impl/selene/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "911"
},
{
"name": "Python",
"bytes": "165054"
}
],
"symlink_target": ""
} |
import unittest
from katas.kyu_7.truncate_a_string import truncate_string
class TruncateStringTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(truncate_string('pippi', 3), 'pip...')
def test_equal_2(self):
self.assertEqual(truncate_string(
'Peter Piper picked a peck of pickled peppers', 14
), 'Peter Piper...')
def test_equal_3(self):
self.assertEqual(truncate_string(
'A-tisket a-tasket A green and yellow basket', 11
), 'A-tisket...')
def test_equal_4(self):
self.assertEqual(truncate_string(
'A-tisket a-tasket A green and yellow basket', 43
), 'A-tisket a-tasket A green and yellow basket')
def test_equal_5(self):
self.assertEqual(truncate_string(
'A-tisket a-tasket A green and yellow basket', 45
), 'A-tisket a-tasket A green and yellow basket')
def test_equal_6(self):
self.assertEqual(truncate_string(
'Chingel loves his Angel so much!!!', 27
), 'Chingel loves his Angel ...')
def test_equal_7(self):
self.assertEqual(truncate_string('A-', 1), 'A...')
def test_equal_8(self):
self.assertEqual(truncate_string('Absolutely Longer', 2), 'Ab...')
def test_equal_9(self):
self.assertEqual(truncate_string('I like ice-cream.Do you?', 19),
'I like ice-cream...')
def test_equal_10(self):
self.assertEqual(truncate_string(
'Seems like you have passed the final test. Congratulations', 53
), 'Seems like you have passed the final test. Congrat...')
| {
"content_hash": "ae79b1d6f8b0ead08c32fcbaac04dccf",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.6110436893203883,
"repo_name": "the-zebulan/CodeWars",
"id": "f9c468afa360323808a76bf113452e245cd98a25",
"size": "1648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/kyu_7_tests/test_truncate_a_string.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
import copy
import testtools
from testtools.matchers import HasLength
from ironicclient.tests import utils
import ironicclient.v1.chassis
CHASSIS = {'id': 42,
'uuid': 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
'extra': {},
'description': 'data-center-1-chassis'}
CHASSIS2 = {'id': 43,
'uuid': 'eeeeeeee-dddd-cccc-bbbb-aaaaaaaaaaaa',
'extra': {},
'description': 'data-center-1-chassis'}
NODE = {'id': 123,
'uuid': '66666666-7777-8888-9999-000000000000',
'chassis_id': 42,
'driver': 'fake',
'driver_info': {'user': 'foo', 'password': 'bar'},
'properties': {'num_cpu': 4},
'extra': {}}
CREATE_CHASSIS = copy.deepcopy(CHASSIS)
del CREATE_CHASSIS['id']
del CREATE_CHASSIS['uuid']
UPDATED_CHASSIS = copy.deepcopy(CHASSIS)
NEW_DESCR = 'new-description'
UPDATED_CHASSIS['description'] = NEW_DESCR
fake_responses = {
'/v1/chassis':
{
'GET': (
{},
{"chassis": [CHASSIS]},
),
'POST': (
{},
CREATE_CHASSIS,
),
},
'/v1/chassis/%s' % CHASSIS['uuid']:
{
'GET': (
{},
CHASSIS,
),
'DELETE': (
{},
None,
),
'PATCH': (
{},
UPDATED_CHASSIS,
),
},
'/v1/chassis/%s/nodes' % CHASSIS['uuid']:
{
'GET': (
{},
{"nodes": [NODE]},
),
},
}
fake_responses_pagination = {
'/v1/chassis':
{
'GET': (
{},
{"chassis": [CHASSIS],
"next": "http://127.0.0.1:6385/v1/chassis/?limit=1"}
),
},
'/v1/chassis/?limit=1':
{
'GET': (
{},
{"chassis": [CHASSIS2]}
),
},
'/v1/chassis/?marker=%s' % CHASSIS['uuid']:
{
'GET': (
{},
{"chassis": [CHASSIS2]}
),
},
'/v1/chassis/%s/nodes?limit=1' % CHASSIS['uuid']:
{
'GET': (
{},
{"nodes": [NODE]},
),
},
'/v1/chassis/%s/nodes?marker=%s' % (CHASSIS['uuid'], NODE['uuid']):
{
'GET': (
{},
{"nodes": [NODE]},
),
},
}
class ChassisManagerTest(testtools.TestCase):
def setUp(self):
super(ChassisManagerTest, self).setUp()
self.api = utils.FakeAPI(fake_responses)
self.mgr = ironicclient.v1.chassis.ChassisManager(self.api)
def test_chassis_list(self):
chassis = self.mgr.list()
expect = [
('GET', '/v1/chassis', {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(chassis))
def test_chassis_list_limit(self):
self.api = utils.FakeAPI(fake_responses_pagination)
self.mgr = ironicclient.v1.chassis.ChassisManager(self.api)
chassis = self.mgr.list(limit=1)
expect = [
('GET', '/v1/chassis/?limit=1', {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertThat(chassis, HasLength(1))
def test_chassis_list_marker(self):
self.api = utils.FakeAPI(fake_responses_pagination)
self.mgr = ironicclient.v1.chassis.ChassisManager(self.api)
chassis = self.mgr.list(marker=CHASSIS['uuid'])
expect = [
('GET', '/v1/chassis/?marker=%s' % CHASSIS['uuid'], {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertThat(chassis, HasLength(1))
def test_chassis_list_pagination_no_limit(self):
self.api = utils.FakeAPI(fake_responses_pagination)
self.mgr = ironicclient.v1.chassis.ChassisManager(self.api)
chassis = self.mgr.list(limit=0)
expect = [
('GET', '/v1/chassis', {}, None),
('GET', '/v1/chassis/?limit=1', {}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertThat(chassis, HasLength(2))
def test_chassis_show(self):
chassis = self.mgr.get(CHASSIS['uuid'])
expect = [
('GET', '/v1/chassis/%s' % CHASSIS['uuid'], {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(CHASSIS['uuid'], chassis.uuid)
self.assertEqual(CHASSIS['description'], chassis.description)
def test_create(self):
chassis = self.mgr.create(**CREATE_CHASSIS)
expect = [
('POST', '/v1/chassis', {}, CREATE_CHASSIS),
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(chassis)
def test_delete(self):
chassis = self.mgr.delete(chassis_id=CHASSIS['uuid'])
expect = [
('DELETE', '/v1/chassis/%s' % CHASSIS['uuid'], {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(chassis)
def test_update(self):
patch = {'op': 'replace',
'value': NEW_DESCR,
'path': '/description'}
chassis = self.mgr.update(chassis_id=CHASSIS['uuid'], patch=patch)
expect = [
('PATCH', '/v1/chassis/%s' % CHASSIS['uuid'], {}, patch),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(NEW_DESCR, chassis.description)
def test_chassis_node_list(self):
nodes = self.mgr.list_nodes(CHASSIS['uuid'])
expect = [
('GET', '/v1/chassis/%s/nodes' % CHASSIS['uuid'], {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(1, len(nodes))
self.assertEqual(NODE['uuid'], nodes[0].uuid)
def test_chassis_node_list_limit(self):
self.api = utils.FakeAPI(fake_responses_pagination)
self.mgr = ironicclient.v1.chassis.ChassisManager(self.api)
nodes = self.mgr.list_nodes(CHASSIS['uuid'], limit=1)
expect = [
('GET',
'/v1/chassis/%s/nodes?limit=1' % CHASSIS['uuid'], {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertThat(nodes, HasLength(1))
self.assertEqual(NODE['uuid'], nodes[0].uuid)
def test_chassis_node_list_marker(self):
self.api = utils.FakeAPI(fake_responses_pagination)
self.mgr = ironicclient.v1.chassis.ChassisManager(self.api)
nodes = self.mgr.list_nodes(CHASSIS['uuid'], marker=NODE['uuid'])
expect = [
('GET',
'/v1/chassis/%s/nodes?marker=%s' % (CHASSIS['uuid'],
NODE['uuid']), {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertThat(nodes, HasLength(1))
self.assertEqual(NODE['uuid'], nodes[0].uuid)
| {
"content_hash": "73474a5b2b1d5390e0a7ccf0a8b6aded",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 74,
"avg_line_length": 29.800884955752213,
"alnum_prop": 0.5232368225686711,
"repo_name": "CiscoUcs/UCS-python-ironicclinet",
"id": "75acb8beafd602323dfa820644c8cf894ce47825",
"size": "7389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ironicclient/tests/v1/test_chassis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "243186"
}
],
"symlink_target": ""
} |
import ConfigParser
import os
pkg_dir = os.path.dirname(__file__)
pkg_dir = os.path.join(pkg_dir, os.path.pardir)
pkg_dir = os.path.normpath(pkg_dir)
_default_config_path = os.path.join(pkg_dir, 'etc')
_inspector_default_config_name = 'inspector.conf'
_default_config = os.path.join(
_default_config_path,
_inspector_default_config_name
)
class ConfigFileBase(object):
_truth = ['true', '1', 't', 'y', 'yes', 'yeah', 'yup',
'certainly', 'uh-huh']
_config = None
_section_name = None
def __init__(self, section_name, filepath=None):
self._section_name = section_name
self._config = ConfigParser.ConfigParser()
if filepath is not None:
self._config.read(self._ensure_abs_path(filepath))
else:
self._config.read(_default_config)
def force_reload_config(self, path):
_path = self._ensure_abs_path(path)
self._config.read(_path)
@staticmethod
def _ensure_abs_path(path):
if path.startswith('~'):
path = os.path.expanduser(path)
else:
path = path
# make sure it is absolute
if not os.path.isabs(path):
return os.path.join(pkg_dir, path)
else:
return path
def _ensure_boolean(self, _value):
if _value.lower() in self._truth:
return True
else:
return False
def get_value(self, key):
return self._config.get(self._section_name, key)
class InspectorConfig(ConfigFileBase):
def __init__(self, filename):
super(InspectorConfig, self).__init__(
"InspectorConfig",
filepath=filename
)
def get_default_host_origin(self):
# get path
return self.get_value('default_hosts_origin')
def get_default_inspection_set(self):
return self.get_value('default_inspection_set')
def get_logfile_path(self):
_path = self.get_value('logfile')
return self._ensure_abs_path(_path)
def get_default_time_format(self):
return self.get_value('time_format')
class ResourceParsersConfig(ConfigFileBase):
def __init__(self):
super(ResourceParsersConfig, self).__init__("ResourceParsers")
def get_proc_parser_filename(self):
return self.get_value("proc")
def get_config_parser_filename(self):
return self.get_value("config")
def get_file_parser_filename(self):
return self.get_value("file")
class DBConfig(ConfigFileBase):
def __init__(self):
super(DBConfig, self).__init__("db")
def get_db_filename(self):
_file = self.get_value("db_file")
return self._ensure_abs_path(_file)
class SSHConfig(ConfigFileBase):
def __init__(self):
super(SSHConfig, self).__init__("ssh")
def get_options(self):
return self.get_value("default_options")
_default_config_file = os.path.join(
_default_config_path,
_inspector_default_config_name
)
inspector_config = InspectorConfig(_default_config_file)
resource_parsers_config = ResourceParsersConfig()
db_config = DBConfig()
ssh_config = SSHConfig()
| {
"content_hash": "c9e2dbfde8512d41c05f2e9b6111205f",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 70,
"avg_line_length": 26.728813559322035,
"alnum_prop": 0.6176284083703234,
"repo_name": "savex/spectra",
"id": "695c993d7828ace01e3b8065dd98f20aacb2bf71",
"size": "3154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spectra/utils/config_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25427"
}
],
"symlink_target": ""
} |
"""
"""
import unittest
from oandapy.api.oanda_base import Core
class TestCore(unittest.TestCase):
"""Test case docstring."""
def setUp(self):
obj = Core(environment="practice", access_token="token")
self.assertIsNotNone(obj)
def tearDown(self):
pass
def test_name(self):
pass
| {
"content_hash": "c5d4e89557437e7290dc8079012dd7ac",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 15.857142857142858,
"alnum_prop": 0.6216216216216216,
"repo_name": "gustavooferreira/oandaApi",
"id": "11e644546e0cb6099d06d90eb34c855d93280a03",
"size": "375",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/api/test_oanda_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1087"
},
{
"name": "Python",
"bytes": "83627"
}
],
"symlink_target": ""
} |
"""\
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
from __future__ import unicode_literals, print_function
import argparse
from collections import defaultdict
from functools import wraps
import json
import os
import re
import yaml
VERSION = '0.3.0pre'
def tfstates(root=None):
root = root or os.getcwd()
for dirpath, _, filenames in os.walk(root):
for name in filenames:
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
for module in state['modules']:
name = module['path'][-1]
for key, resource in module['resources'].items():
yield name, key, resource
## READ RESOURCES
PARSERS = {}
def _clean_dc(dcname):
# Consul DCs are strictly alphanumeric with underscores and hyphens -
# ensure that the consul_dc attribute meets these requirements.
return re.sub('[^\w_\-]', '-', dcname)
def iterhosts(resources):
'''yield host tuples of (name, attributes, groups)'''
for module_name, key, resource in resources:
resource_type, name = key.split('.', 1)
try:
parser = PARSERS[resource_type]
except KeyError:
continue
yield parser(resource, module_name)
def parses(prefix):
def inner(func):
PARSERS[prefix] = func
return func
return inner
def calculate_mantl_vars(func):
"""calculate Mantl vars"""
@wraps(func)
def inner(*args, **kwargs):
name, attrs, groups = func(*args, **kwargs)
# attrs
if attrs.get('role', '') == 'control':
attrs['consul_is_server'] = True
else:
attrs['consul_is_server'] = False
# groups
if attrs.get('publicly_routable', False):
groups.append('publicly_routable')
return name, attrs, groups
return inner
def _get_ignore_blank(obj, key, default=None):
"""
Get a key in an object, but treat blank string as missing value.
"""
v = obj.get(key, default)
if v == "":
return default
return v
def _parse_prefix(source, prefix, sep='.'):
for compkey, value in source.items():
try:
curprefix, rest = compkey.split(sep, 1)
except ValueError:
continue
if curprefix != prefix or rest == '#':
continue
yield rest, value
def parse_attr_list(source, prefix, sep='.'):
attrs = defaultdict(dict)
for compkey, value in _parse_prefix(source, prefix, sep):
idx, key = compkey.split(sep, 1)
attrs[idx][key] = value
return attrs.values()
def parse_dict(source, prefix, sep='.'):
return dict(_parse_prefix(source, prefix, sep))
def parse_list(source, prefix, sep='.'):
return [value for _, value in _parse_prefix(source, prefix, sep)]
def parse_bool(string_form):
token = string_form.lower()[0]
if token == 't':
return True
elif token == 'f':
return False
else:
raise ValueError('could not convert %r to a bool' % string_form)
@parses('triton_machine')
@calculate_mantl_vars
def triton_machine(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs.get('name')
groups = []
attrs = {
'id': raw_attrs['id'],
'dataset': raw_attrs['dataset'],
'disk': raw_attrs['disk'],
'firewall_enabled': parse_bool(raw_attrs['firewall_enabled']),
'image': raw_attrs['image'],
'ips': parse_list(raw_attrs, 'ips'),
'memory': raw_attrs['memory'],
'name': raw_attrs['name'],
'networks': parse_list(raw_attrs, 'networks'),
'package': raw_attrs['package'],
'primary_ip': raw_attrs['primaryip'],
'root_authorized_keys': raw_attrs['root_authorized_keys'],
'state': raw_attrs['state'],
'tags': parse_dict(raw_attrs, 'tags'),
'type': raw_attrs['type'],
'user_data': raw_attrs['user_data'],
'user_script': raw_attrs['user_script'],
# ansible
'ansible_ssh_host': raw_attrs['primaryip'],
'ansible_ssh_port': 22,
'ansible_ssh_user': 'root', # it's "root" on Triton by default
# generic
'public_ipv4': raw_attrs['primaryip'],
'provider': 'triton',
}
# private IPv4
for ip in attrs['ips']:
if ip.startswith('10') or ip.startswith('192.168'): # private IPs
attrs['private_ipv4'] = ip
break
if 'private_ipv4' not in attrs:
attrs['private_ipv4'] = attrs['public_ipv4']
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['tags'].get('dc', 'none')),
'role': attrs['tags'].get('role', 'none'),
'ansible_python_interpreter': attrs['tags'].get('python_bin', 'python')
})
# add groups based on attrs
groups.append('triton_image=' + attrs['image'])
groups.append('triton_package=' + attrs['package'])
groups.append('triton_state=' + attrs['state'])
groups.append('triton_firewall_enabled=%s' % attrs['firewall_enabled'])
groups.extend('triton_tags_%s=%s' % item
for item in attrs['tags'].items())
groups.extend('triton_network=' + network
for network in attrs['networks'])
# groups specific to Mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('digitalocean_droplet')
@calculate_mantl_vars
def digitalocean_host(resource, tfvars=None):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ipv4_address': raw_attrs['ipv4_address'],
'locked': parse_bool(raw_attrs['locked']),
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
'region': raw_attrs['region'],
'size': raw_attrs['size'],
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
'status': raw_attrs['status'],
# ansible
'ansible_ssh_host': raw_attrs['ipv4_address'],
'ansible_ssh_port': 22,
'ansible_ssh_user': 'root', # it's always "root" on DO
# generic
'public_ipv4': raw_attrs['ipv4_address'],
'private_ipv4': raw_attrs.get('ipv4_address_private',
raw_attrs['ipv4_address']),
'provider': 'digitalocean',
}
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# add groups based on attrs
groups.append('do_image=' + attrs['image'])
groups.append('do_locked=%s' % attrs['locked'])
groups.append('do_region=' + attrs['region'])
groups.append('do_size=' + attrs['size'])
groups.append('do_status=' + attrs['status'])
groups.extend('do_metadata_%s=%s' % item
for item in attrs['metadata'].items())
# groups specific to Mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('softlayer_virtualserver')
@calculate_mantl_vars
def softlayer_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ipv4_address': raw_attrs['ipv4_address'],
'metadata': json.loads(raw_attrs.get('user_data', '{}')),
'region': raw_attrs['region'],
'ram': raw_attrs['ram'],
'cpu': raw_attrs['cpu'],
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
'public_ipv4': raw_attrs['ipv4_address'],
'private_ipv4': raw_attrs['ipv4_address_private'],
'ansible_ssh_host': raw_attrs['ipv4_address'],
'ansible_ssh_port': 22,
'ansible_ssh_user': 'root',
'provider': 'softlayer',
}
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# groups specific to Mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('openstack_compute_instance_v2')
@calculate_mantl_vars
def openstack_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'access_ip_v4': raw_attrs['access_ip_v4'],
'access_ip_v6': raw_attrs['access_ip_v6'],
'flavor': parse_dict(raw_attrs, 'flavor',
sep='_'),
'id': raw_attrs['id'],
'image': parse_dict(raw_attrs, 'image',
sep='_'),
'key_pair': raw_attrs['key_pair'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'region': raw_attrs.get('region', ''),
'security_groups': parse_list(raw_attrs, 'security_groups'),
# ansible
'ansible_ssh_port': 22,
# workaround for an OpenStack bug where hosts have a different domain
# after they're restarted
'host_domain': 'novalocal',
'use_host_domain': True,
# generic
'public_ipv4': raw_attrs['access_ip_v4'],
'private_ipv4': raw_attrs['access_ip_v4'],
'provider': 'openstack',
}
if 'floating_ip' in raw_attrs:
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
try:
attrs.update({
'ansible_host': raw_attrs['access_ip_v4'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_host': '', 'publicly_routable': False})
# attrs specific to Ansible
if 'metadata.ssh_user' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# add groups based on attrs
groups.append('os_image=' + attrs['image']['name'])
groups.append('os_flavor=' + attrs['flavor']['name'])
groups.extend('os_metadata_%s=%s' % item
for item in attrs['metadata'].items())
groups.append('os_region=' + attrs['region'])
# groups specific to Mantl
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('aws_instance')
@calculate_mantl_vars
def aws_host(resource, module_name):
name = resource['primary']['attributes']['tags.Name']
raw_attrs = resource['primary']['attributes']
groups = []
attrs = {
'ami': raw_attrs['ami'],
'availability_zone': raw_attrs['availability_zone'],
'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),
'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),
'ephemeral_block_device': parse_attr_list(raw_attrs,
'ephemeral_block_device'),
'id': raw_attrs['id'],
'key_name': raw_attrs['key_name'],
'private': parse_dict(raw_attrs, 'private',
sep='_'),
'public': parse_dict(raw_attrs, 'public',
sep='_'),
'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),
'security_groups': parse_list(raw_attrs, 'security_groups'),
'subnet': parse_dict(raw_attrs, 'subnet',
sep='_'),
'tags': parse_dict(raw_attrs, 'tags'),
'tenancy': raw_attrs['tenancy'],
'vpc_security_group_ids': parse_list(raw_attrs,
'vpc_security_group_ids'),
# ansible-specific
'ansible_ssh_port': 22,
'ansible_ssh_host': raw_attrs['public_ip'],
# generic
'public_ipv4': raw_attrs['public_ip'],
'private_ipv4': raw_attrs['private_ip'],
'provider': 'aws',
}
# attrs specific to Ansible
if 'tags.sshUser' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['tags.sshUser']
if 'tags.sshPrivateIp' in raw_attrs:
attrs['ansible_ssh_host'] = raw_attrs['private_ip']
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['tags'].get('dc', module_name)),
'role': attrs['tags'].get('role', 'none'),
'ansible_python_interpreter': attrs['tags'].get('python_bin','python')
})
# groups specific to Mantl
groups.extend(['aws_ami=' + attrs['ami'],
'aws_az=' + attrs['availability_zone'],
'aws_key_name=' + attrs['key_name'],
'aws_tenancy=' + attrs['tenancy']])
groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())
groups.extend('aws_vpc_security_group=' + group
for group in attrs['vpc_security_group_ids'])
groups.extend('aws_subnet_%s=%s' % subnet
for subnet in attrs['subnet'].items())
# groups specific to Mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('digitalocean_droplet')
@calculate_mi_vars
def digitalocean_host(resource, tfvars=None):
raw_attrs = resource['primary']['attributes']
groups = []
# general attrs
attrs = {
'name': raw_attrs['name'],
'metadata': yaml.load(raw_attrs['user_data']),
'region': raw_attrs['region'],
'size': raw_attrs['size'],
# ansible
'ansible_port': 22,
# Could be passed from the command line via environment variable
'ansible_user': 'root',
'ansible_host': raw_attrs['ipv4_address'],
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
'role': attrs['metadata'].get('role', 'none')
})
# groups specific to microservices-infrastructure
name = attrs.get('name')
groups.append('region=' + attrs['region'])
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('google_compute_instance')
@calculate_mantl_vars
def gce_host(resource, module_name):
name = resource['primary']['id']
raw_attrs = resource['primary']['attributes']
groups = []
# network interfaces
interfaces = parse_attr_list(raw_attrs, 'network_interface')
for interface in interfaces:
interface['access_config'] = parse_attr_list(interface,
'access_config')
for key in interface.keys():
if '.' in key:
del interface[key]
# general attrs
attrs = {
'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
'disks': parse_attr_list(raw_attrs, 'disk'),
'machine_type': raw_attrs['machine_type'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'network_interface': interfaces,
'self_link': raw_attrs['self_link'],
'service_account': parse_attr_list(raw_attrs, 'service_account'),
'tags': parse_list(raw_attrs, 'tags'),
'zone': raw_attrs['zone'],
# ansible
'ansible_ssh_port': 22,
'provider': 'gce',
}
# attrs specific to Ansible
if 'metadata.ssh_user' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
try:
attrs.update({
'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
'public_ipv4': interfaces[0]['access_config'][0]['nat_ip'] or interfaces[0]['access_config'][0]['assigned_nat_ip'],
'private_ipv4': interfaces[0]['address'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_host': '', 'publicly_routable': False})
# add groups based on attrs
groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
groups.append('gce_machine_type=' + attrs['machine_type'])
groups.extend('gce_metadata_%s=%s' % (key, value)
for (key, value) in attrs['metadata'].items()
if key not in set(['sshKeys']))
groups.extend('gce_tag=' + tag for tag in attrs['tags'])
groups.append('gce_zone=' + attrs['zone'])
if attrs['can_ip_forward']:
groups.append('gce_ip_forward')
if attrs['publicly_routable']:
groups.append('gce_publicly_routable')
# groups specific to Mantl
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('azure_instance')
@calculate_mi_vars
def azure_host(resource, module_name):
name = resource['primary']['attributes']['name']
raw_attrs = resource['primary']['attributes']
groups = []
attrs = {
'automatic_updates': raw_attrs['automatic_updates'],
'description': raw_attrs['description'],
'hosted_service_name': raw_attrs['hosted_service_name'],
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ip_address': raw_attrs['ip_address'],
'location': raw_attrs['location'],
'name': raw_attrs['name'],
'reverse_dns': raw_attrs['reverse_dns'],
'security_group': raw_attrs['security_group'],
'size': raw_attrs['size'],
'ssh_key_thumbprint': raw_attrs['ssh_key_thumbprint'],
'subnet': raw_attrs['subnet'],
'username': raw_attrs['username'],
'vip_address': raw_attrs.get('vip_address'),
'virtual_network': raw_attrs.get('virtual_network'),
'endpoint': parse_attr_list(raw_attrs, 'endpoint'),
# ansible
'ansible_port': 22,
'ansible_user': raw_attrs['username'],
'ansible_host': raw_attrs.get('vip_address', raw_attrs['ip_address']),
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': attrs['location'].lower().replace(" ", "-"),
'role': attrs['description']
})
# groups specific to microservices-infrastructure
groups.extend(['azure_image=' + attrs['image'],
'azure_location=' + attrs['location'].lower().replace(" ", "-"),
'azure_username=' + attrs['username'],
'azure_security_group=' + attrs['security_group']])
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('vsphere_virtual_machine')
@calculate_mantl_vars
def vsphere_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
network_attrs = parse_dict(raw_attrs, 'network_interface')
network = parse_dict(network_attrs, '0')
ip_address = network.get('ipv4_address', network['ip_address'])
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'ip_address': ip_address,
'private_ipv4': ip_address,
'public_ipv4': ip_address,
'metadata': parse_dict(raw_attrs, 'custom_configuration_parameters'),
'ansible_ssh_port': 22,
'provider': 'vsphere',
}
try:
attrs.update({
'ansible_ssh_host': ip_address,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', })
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('consul_dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# attrs specific to Ansible
if 'ssh_user' in attrs['metadata']:
attrs['ansible_ssh_user'] = attrs['metadata']['ssh_user']
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('azurerm_virtual_machine')
@calculate_mantl_vars
def azurerm_host(resource, module_name):
name = resource['primary']['attributes']['name']
raw_attrs = resource['primary']['attributes']
groups = []
attrs = {
'id': raw_attrs['id'],
'name': raw_attrs['name'],
# ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs.get('tags.ssh_user', ''),
'ansible_ssh_host': raw_attrs.get('tags.ssh_ip', ''),
}
groups.append('role=' + raw_attrs.get('tags.role', ''))
return name, attrs, groups
@parses('azure_instance')
@calculate_mantl_vars
def azure_host(resource, module_name):
name = resource['primary']['attributes']['name']
raw_attrs = resource['primary']['attributes']
groups = []
attrs = {
'automatic_updates': raw_attrs['automatic_updates'],
'description': raw_attrs['description'],
'hosted_service_name': raw_attrs['hosted_service_name'],
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ip_address': raw_attrs['ip_address'],
'location': raw_attrs['location'],
'name': raw_attrs['name'],
'reverse_dns': raw_attrs['reverse_dns'],
'security_group': raw_attrs['security_group'],
'size': raw_attrs['size'],
'ssh_key_thumbprint': raw_attrs['ssh_key_thumbprint'],
'subnet': raw_attrs['subnet'],
'username': raw_attrs['username'],
'vip_address': raw_attrs['vip_address'],
'virtual_network': raw_attrs['virtual_network'],
'endpoint': parse_attr_list(raw_attrs, 'endpoint'),
# ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs['username'],
'ansible_ssh_host': raw_attrs['vip_address'],
}
# attrs specific to mantl
attrs.update({
'consul_dc': attrs['location'].lower().replace(" ", "-"),
'role': attrs['description']
})
# groups specific to mantl
groups.extend(['azure_image=' + attrs['image'],
'azure_location=' + attrs['location'].lower().replace(" ", "-"),
'azure_username=' + attrs['username'],
'azure_security_group=' + attrs['security_group']])
# groups specific to mantl
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('clc_server')
@calculate_mantl_vars
def clc_server(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs.get('id')
groups = []
md = parse_dict(raw_attrs, 'metadata')
attrs = {
'metadata': md,
'ansible_ssh_port': md.get('ssh_port', 22),
'ansible_ssh_user': md.get('ssh_user', 'root'),
'provider': 'clc',
'publicly_routable': False,
}
try:
attrs.update({
'public_ipv4': raw_attrs['public_ip_address'],
'private_ipv4': raw_attrs['private_ip_address'],
'ansible_ssh_host': raw_attrs['public_ip_address'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({
'ansible_ssh_host': raw_attrs['private_ip_address'],
'private_ipv4': raw_attrs['private_ip_address'],
})
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
})
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('ucs_service_profile')
@calculate_mantl_vars
def ucs_host(resource, module_name):
name = resource['primary']['id']
raw_attrs = resource['primary']['attributes']
groups = []
# general attrs
attrs = {
'metadata': parse_dict(raw_attrs, 'metadata'),
'provider': 'ucs',
}
# attrs specific to mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
})
try:
attrs.update({
'ansible_ssh_host': raw_attrs['vNIC.0.ip'],
'public_ipv4': raw_attrs['vNIC.0.ip'],
'private_ipv4': raw_attrs['vNIC.0.ip']
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# add groups based on attrs
groups.append('role=' + attrs['role']) #.get('role', 'none'))
# groups.append('all:children')
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
## QUERY TYPES
def query_host(hosts, target):
for name, attrs, _ in hosts:
if name == target:
return attrs
return {}
def query_list(hosts):
groups = defaultdict(dict)
meta = {}
for name, attrs, hostgroups in hosts:
for group in set(hostgroups):
groups[group].setdefault('hosts', [])
groups[group]['hosts'].append(name)
meta[name] = attrs
groups['_meta'] = {'hostvars': meta}
return groups
def query_hostfile(hosts):
out = ['## begin hosts generated by terraform.py ##']
out.extend(
'{}\t{}'.format(attrs['ansible_ssh_host'].ljust(16), name)
for name, attrs, _ in hosts
)
out.append('## end hosts generated by terraform.py ##')
return '\n'.join(out)
def main():
parser = argparse.ArgumentParser(
__file__, __doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
modes = parser.add_mutually_exclusive_group(required=True)
modes.add_argument('--list',
action='store_true',
help='list all variables')
modes.add_argument('--host', help='list variables for a single host')
modes.add_argument('--version',
action='store_true',
help='print version and exit')
modes.add_argument('--hostfile',
action='store_true',
help='print hosts as a /etc/hosts snippet')
parser.add_argument('--pretty',
action='store_true',
help='pretty-print output JSON')
parser.add_argument('--nometa',
action='store_true',
help='with --list, exclude hostvars')
default_root = os.environ.get('TERRAFORM_STATE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', )))
parser.add_argument('--root',
default=default_root,
help='custom root to search for `.tfstate`s in')
args = parser.parse_args()
if args.version:
print('%s %s' % (__file__, VERSION))
parser.exit()
hosts = iterhosts(iterresources(tfstates(args.root)))
if args.list:
output = query_list(hosts)
if args.nometa:
del output['_meta']
print(json.dumps(output, indent=4 if args.pretty else None))
elif args.host:
output = query_host(hosts, args.host)
print(json.dumps(output, indent=4 if args.pretty else None))
elif args.hostfile:
output = query_hostfile(hosts)
print(output)
parser.exit()
if __name__ == '__main__':
main()
| {
"content_hash": "73030450eff36399c92bcdd445e51a3f",
"timestamp": "",
"source": "github",
"line_count": 866,
"max_line_length": 132,
"avg_line_length": 32.90993071593533,
"alnum_prop": 0.5703157894736842,
"repo_name": "Capgemini/terraform.py",
"id": "b051d569edf83548a45fa3ac3b6c5cca4ddaa737",
"size": "29107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terraform.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "63563"
}
],
"symlink_target": ""
} |
"""Majordomo Protocol Client API, Python version.
Implements the MDP/Worker spec at http:#rfc.zeromq.org/spec:7.
Author: Min RK <benjaminrk@gmail.com>
Based on Java example by Arkadiusz Orzechowski
"""
import logging
import zmq
import MDP
from zhelpers import dump
class MajorDomoClient(object):
"""Majordomo Protocol Client API, Python version.
Implements the MDP/Worker spec at http:#rfc.zeromq.org/spec:7.
"""
broker = None
ctx = None
client = None
poller = None
timeout = 2500
verbose = False
def __init__(self, broker, verbose=False):
self.broker = broker
self.verbose = verbose
self.ctx = zmq.Context()
self.poller = zmq.Poller()
logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
self.reconnect_to_broker()
def reconnect_to_broker(self):
"""Connect or reconnect to broker"""
if self.client:
self.poller.unregister(self.client)
self.client.close()
self.client = self.ctx.socket(zmq.DEALER)
self.client.linger = 0
self.client.connect(self.broker)
self.poller.register(self.client, zmq.POLLIN)
if self.verbose:
logging.info("I: connecting to broker at %s...", self.broker)
def send(self, service, request):
"""Send request to broker
"""
if not isinstance(request, list):
request = [request]
# Prefix request with protocol frames
# Frame 0: empty (REQ emulation)
# Frame 1: "MDPCxy" (six bytes, MDP/Client x.y)
# Frame 2: Service name (printable string)
request = ['', MDP.C_CLIENT, service] + request
if self.verbose:
logging.warn("I: send request to '%s' service: ", service)
dump(request)
self.client.send_multipart(request)
def recv(self):
"""Returns the reply message or None if there was no reply."""
try:
items = self.poller.poll(self.timeout)
except KeyboardInterrupt:
return # interrupted
if items:
# if we got a reply, process it
msg = self.client.recv_multipart()
if self.verbose:
logging.info("I: received reply:")
dump(msg)
# Don't try to handle errors, just assert noisily
assert len(msg) >= 4
empty = msg.pop(0)
header = msg.pop(0)
assert MDP.C_CLIENT == header
service = msg.pop(0)
return msg
else:
logging.warn("W: permanent error, abandoning request")
| {
"content_hash": "50612dee0bf4b1e3a30e4b661778e9bf",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 90,
"avg_line_length": 30.065217391304348,
"alnum_prop": 0.5683297180043384,
"repo_name": "krattai/noo-ebs",
"id": "6c43940d663c18e9c695cdcd386c2346eb69b5f4",
"size": "2766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/zeroMQ-guide2/examples/Python/mdcliapi2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ActionScript",
"bytes": "2384"
},
{
"name": "Assembly",
"bytes": "4590201"
},
{
"name": "Awk",
"bytes": "396"
},
{
"name": "Batchfile",
"bytes": "19241"
},
{
"name": "C",
"bytes": "15563482"
},
{
"name": "C#",
"bytes": "265955"
},
{
"name": "C++",
"bytes": "691846"
},
{
"name": "CMake",
"bytes": "104078"
},
{
"name": "CSS",
"bytes": "72772"
},
{
"name": "DTrace",
"bytes": "1258"
},
{
"name": "Erlang",
"bytes": "4424888"
},
{
"name": "GAP",
"bytes": "1517"
},
{
"name": "HTML",
"bytes": "65461"
},
{
"name": "Haxe",
"bytes": "6282"
},
{
"name": "Java",
"bytes": "6899"
},
{
"name": "JavaScript",
"bytes": "494026"
},
{
"name": "Lua",
"bytes": "274783"
},
{
"name": "M4",
"bytes": "107581"
},
{
"name": "Makefile",
"bytes": "143161"
},
{
"name": "NSIS",
"bytes": "27658"
},
{
"name": "Objective-C",
"bytes": "13321"
},
{
"name": "PHP",
"bytes": "43263"
},
{
"name": "PLpgSQL",
"bytes": "80625"
},
{
"name": "Perl",
"bytes": "344546"
},
{
"name": "Python",
"bytes": "500718"
},
{
"name": "QML",
"bytes": "150"
},
{
"name": "QMake",
"bytes": "3028"
},
{
"name": "Ragel",
"bytes": "46210"
},
{
"name": "Roff",
"bytes": "120721"
},
{
"name": "Ruby",
"bytes": "121530"
},
{
"name": "Shell",
"bytes": "293349"
},
{
"name": "TeX",
"bytes": "788237"
},
{
"name": "XSLT",
"bytes": "1459"
},
{
"name": "Yacc",
"bytes": "5139"
}
],
"symlink_target": ""
} |
import unittest
from p1solution.p1solution import P1Solution
class MyTestCase(unittest.TestCase):
EXPECTED_SUM = 233168
def setUp(self):
self.p1solution = P1Solution()
def test_naive(self):
self.assertEqual(self.p1solution.naive(), self.EXPECTED_SUM)
def test_optimize_using_mem(self):
self.assertEqual(self.p1solution.optimize_using_mem(), self.EXPECTED_SUM)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6f18192dbe2fc74c70d5374e1c28c6c2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 81,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.6887417218543046,
"repo_name": "yong-at-git/fun-math",
"id": "f366729311aa99ad485f02b9f01780ea44001089",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p-1-multiples-of-3-and-5/tests/test_p1solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2530"
}
],
"symlink_target": ""
} |
"""
Allows describing functions, specifically enumerating arguments which
may be passed in a combination of registers and stack values.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gdb
from capstone import CS_GRP_CALL
from capstone import CS_GRP_INT
import pwndbg.abi
import pwndbg.arch
import pwndbg.chain
import pwndbg.color.nearpc as N
import pwndbg.constants
import pwndbg.disasm
import pwndbg.funcparser
import pwndbg.functions
import pwndbg.ida
import pwndbg.memory
import pwndbg.regs
import pwndbg.symbol
import pwndbg.typeinfo
ida_replacements = {
'__int64': 'signed long long int',
'__int32': 'signed int',
'__int16': 'signed short',
'__int8': 'signed char',
'__uint64': 'unsigned long long int',
'__uint32': 'unsigned int',
'__uint16': 'unsigned short',
'__uint8': 'unsigned char',
'_BOOL_1': 'unsigned char',
'_BOOL_2': 'unsigned short',
'_BOOL_4': 'unsigned int',
'_BYTE': 'unsigned char',
'_WORD': 'unsigned short',
'_DWORD': 'unsigned int',
'_QWORD': 'unsigned long long',
'__pure': '',
'__hidden': '',
'__return_ptr': '',
'__struct_ptr': '',
'__array_ptr': '',
'__fastcall': '',
'__cdecl': '',
'__thiscall': '',
'__userpurge': '',
}
def get_syscall_name(instruction):
if CS_GRP_INT not in instruction.groups:
return None
try:
abi = pwndbg.abi.ABI.syscall()
syscall = getattr(pwndbg.regs, abi.syscall_register)
name = pwndbg.constants.syscall(syscall)
return 'SYS_' + name
except:
return None
def get(instruction):
"""
Returns an array containing the arguments to the current function,
if $pc is a 'call' or 'bl' type instruction.
Otherwise, returns None.
"""
n_args_default = 4
if instruction.address != pwndbg.regs.pc:
return []
try:
abi = pwndbg.abi.ABI.default()
except KeyError:
return []
if CS_GRP_CALL in instruction.groups:
# Not sure of any OS which allows multiple operands on
# a call instruction.
assert len(instruction.operands) == 1
target = instruction.operands[0].int
if not target:
return []
name = pwndbg.symbol.get(target)
if not name:
return []
elif CS_GRP_INT in instruction.groups:
# Get the syscall number and name
abi = pwndbg.abi.ABI.syscall()
target = None
syscall = getattr(pwndbg.regs, abi.syscall_register)
name = pwndbg.constants.syscall(syscall)
else:
return []
result = []
name = name or ''
sym = gdb.lookup_symbol(name)
name = name.strip().lstrip('_') # _malloc
name = name.replace('isoc99_', '') # __isoc99_sscanf
name = name.replace('@plt', '') # getpwiod@plt
name = name.replace('_chk', '') # __printf_chk
func = pwndbg.functions.functions.get(name, None)
# Try to extract the data from GDB.
# Note that this is currently broken, pending acceptance of
# my patch: https://sourceware.org/ml/gdb-patches/2015-06/msg00268.html
if sym and sym[0]:
try:
n_args_default = len(sym[0].type.fields())
except TypeError:
pass
# Try to grab the data out of IDA
if not func and target:
typename = pwndbg.ida.GetType(target)
if typename:
typename += ';'
# GetType() does not include the name.
typename = typename.replace('(', ' function_name(', 1)
for k, v in ida_replacements.items():
typename = typename.replace(k, v)
func = pwndbg.funcparser.ExtractFuncDeclFromSource(typename + ';')
if func:
args = func.args
else:
args = [pwndbg.functions.Argument('int', 0, argname(i, abi)) for i in range(n_args_default)]
for i, arg in enumerate(args):
result.append((arg, argument(i, abi)))
return result
def argname(n, abi=None):
abi = abi or pwndbg.abi.ABI.default()
regs = abi.register_arguments
if n < len(regs):
return regs[n]
return 'arg[%i]' % n
def argument(n, abi=None):
"""
Returns the nth argument, as if $pc were a 'call' or 'bl' type
instruction.
Works only for ABIs that use registers for arguments.
"""
abi = abi or pwndbg.abi.ABI.default()
regs = abi.register_arguments
if n < len(regs):
return getattr(pwndbg.regs, regs[n])
n -= len(regs)
sp = pwndbg.regs.sp + (n * pwndbg.arch.ptrsize)
return int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, sp))
def arguments(abi=None):
"""
Yields (arg_name, arg_value) tuples for arguments from a given ABI.
Works only for ABIs that use registers for arguments.
"""
abi = abi or pwndbg.abi.ABI.default()
regs = abi.register_arguments
for i in range(len(regs)):
yield argname(i, abi), argument(i, abi)
def format_args(instruction):
result = []
for arg, value in get(instruction):
code = arg.type != 'char'
pretty = pwndbg.chain.format(value, code=code)
result.append('%-10s %s' % (N.argument(arg.name) + ':', pretty))
return result
| {
"content_hash": "bb9a6d8a5494fe1d3ab8821397bc7b1c",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 100,
"avg_line_length": 26.38118811881188,
"alnum_prop": 0.6055545130418465,
"repo_name": "disconnect3d/pwndbg",
"id": "a3a3603a05c50eca739902982475b1950b6cbbac",
"size": "5375",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pwndbg/arguments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "584"
},
{
"name": "C",
"bytes": "113"
},
{
"name": "Makefile",
"bytes": "964"
},
{
"name": "Python",
"bytes": "1778958"
},
{
"name": "Shell",
"bytes": "4466"
}
],
"symlink_target": ""
} |
import os
import six
import json
from requests_oauthlib import OAuth1Session
consumer_key = 'XJCbpn5nHHDNW48NBMx0eg'
consumer_secret = 'gcxv78Aq6kBulp663LFgug'
class Figshare(object):
def __init__(self, consumer_key, consumer_secret, access_token,
access_token_secret):
"""
Connects to the figshare API.
"""
self.client = OAuth1Session(
consumer_key, consumer_secret, access_token, access_token_secret)
self.endpoint = 'http://api.figshare.com/v1/my_data'
def article(self, article_id):
"""
Returns a single article.
"""
response = self.client.get(self.endpoint + '/articles/%s' % article_id)
return response.json()['items'][0]
def delete_article(self, article_id):
"""
Deletes article `article_id`.
"""
response = self.client.delete(
self.endpoint + '/articles/%s' % article_id)
return json.loads(response.content)
def articles(self, limit=None):
"""
Parameters
----------
limit : int or None
If not None, then limit the number of articles returned.
Returns
-------
Dict of {count: integer count of articles, items: dictionary
representing each article}
"""
# API only returns 10 results at a time, so keep asking for more pages
# until we can't get any more...
all_articles = []
count = 0
page = 1
while True:
if limit is not None and (len(all_articles) < limit):
break
data = {'page': page}
response = self.client.get(
self.endpoint + '/articles',
params={'page': page}
)
# Keep the response around for debugging if needed; get a separate
# results dict
results = response.json()
if results['count'] == 0:
break
all_articles.extend(results['items'])
count += results['count']
page += 1
# Reconstruct the JSON dict in the same format returned by a single
# response (with keys [count, items])
assert count == len(all_articles)
return {'count': count, 'items': all_articles}
def create_article(self, title, description, defined_type='dataset'):
"""
Create an article.
`title`, `description` are required; other `defined_type` value can be
"fileset". There are likely others (e.g., "figure", "code", "media",
etc) but these are currently undocumented by the API, so use at your
own risk.
"""
response = self.client.post(
self.endpoint + '/articles',
data=json.dumps({'title': title,
'description': description,
'defined_type': defined_type,
}),
headers={'content-type': 'application/json'})
return response.json()
def make_private(self, article_id):
"""
Make an article private.
If an article was just created, it is still in draft form. This method
will turn it into a private article.
"""
response = self.client.post(
'%s/articles/%s/action/make_private' % (self.endpoint, article_id))
return response.json()
def update_article(self, article_id, title=None, description=None,
defined_type=None):
"""
Update title, description, and defined_type.
Any of these values can be None if you don't want to change them.
"""
data = {'title': title,
'description': description,
'defined_type': defined_type}
data = dict((k, v) for k, v in data.items() if v is not None)
response = self.client.put(
'%s/articles/%s' % (self.endpoint, article_id),
data=json.dumps(data),
headers={'content-type': 'application/json'})
return response.json()
def upload_file(self, article_id, filepath_or_buffer):
"""
Upload a file.
`filepath_or_buffer` can be a string or an open file object.
"""
if isinstance(filepath_or_buffer, six.string_types):
file = open(filepath_or_buffer, 'rb')
own_handle = True
else:
file = filepath_or_buffer
own_handle = False
try:
files = {'filedata': (os.path.basename(file.name), file)}
response = self.client.put(
'%s/articles/%s/files' % (self.endpoint, article_id),
files=files)
return response.json()
finally:
if own_handle:
file.close()
def delete_file(self, article_id, file_id):
"""
Delete a file.
"""
response = self.client.delete(
'%s/articles/%s/files/%s' % (self.endpoint, article_id, file_id)
)
return response.json()
def add_link(self, article_id, link):
"""
Add a link.
"""
response = self.client.put(
self.endpoint + '/articles/%s/links' % article_id,
data=json.dumps({'link': link}),
headers={'content-type': 'application/json'}
)
return response.json()
def delete_link(self, article_id, link_id):
"""
Delete a link.
This requires a link ID, which you can get from inspecting the article
JSON. For example::
first_link_id = article['links'][0]['id']
"""
response = self.client.delete(
self.endpoint + '/articles/%s/links/%s' % (article_id, link_id)
)
return reponse.json()
def versions(self, article_id):
"""
Show the versions of this article
"""
response = self.client.get(
self.endpoint + '/articles/%s/versions' % article_id
)
return response.json()
def get_version(self, article_id, version_number):
"""
Get a particular version of this article.
"""
response = self.client.get(
self.endpoint + '/articles/%s/versions/%s' % (article_id, version_number)
)
return response.json()
def categories(self):
"""
Show the possible categories supplied by figshare.
"""
response = self.client.get(
self.endpoint.replace('/my_data', '') + '/categories'
)
return response.json()
def add_tag(self, article_id, tag):
"""
Add a tag to an article.
"""
response = self.client.put(
self.endpoint + '/articles/%s/tags' % article_id,
data=json.dumps({'tag_name': tag}),
headers={'content-type': 'application/json'})
return response.json()
def delete_tag(self, article_id, tag_id):
"""
Delete a tag from an article.
This requires a tag ID, which you can get from inspecting the article JSON. For example::
first_tag_id = article['tags'][0]['id']
"""
response = self.client.delete(
self.endpoint + '/articles/%s/categories/%s' % (article_id, tag_id)
)
return response.json()
def add_category(self, article_id, category_id):
"""
Add a category to an article.
See the categories() method to see the options and to select
a `category_id`.
"""
response = self.client.put(
self.endpoint + '/articles/%s/categories' % article_id,
data=json.dumps({'category_id': category_id}),
headers={'content-type': 'application/json'})
return response.json()
def delete_category(self, article_id, category_id):
"""
Delete a category from an article.
"""
response = self.client.delete(
self.endpoint + '/articles/%s/categories/%s' %
(article_id, category_id)
)
return response.json()
def add_author(self, article_id, author_id):
"""
Add an author to an article.
"""
response = self.client.put(
self.endpoint + '/articles/%s/categories' % article_id,
data=json.dumps({'author_id': author_id}),
headers={'content-type': 'application/json'})
return response.json()
def delete_author(self, article_id, author_id):
"""
Delete an author from an article
"""
response = self.client.delete(
self.endpoint + '/articles/%s/categories/%s' %
(article_id, author_id)
)
return response.json()
| {
"content_hash": "7db404c5c7e61117d95acd5f81421492",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 98,
"avg_line_length": 32.494464944649444,
"alnum_prop": 0.5405405405405406,
"repo_name": "rmcgibbo/figshare",
"id": "a11683145fb86928c6eb6310f9fa0915fdff7511",
"size": "8806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "figshare/figshare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16869"
}
],
"symlink_target": ""
} |
import os
import sys
import windmill
from windmill.dep import json
import tempfile
if not sys.version.startswith('2.4'):
from urlparse import urlparse
else:
# python 2.4
from windmill.tools.urlparse_25 import urlparse
def get_save_url(suite_name, extension):
url = urlparse(windmill.settings['TEST_URL'])
return url.scheme+'://'+url.netloc+'/windmill-saves/'+suite_name+'.'+extension
def create_saves_path():
directory = tempfile.mkdtemp(suffix='.windmill-saves')
# Mount the fileserver application for tests
from windmill.dep import wsgi_fileserver
WSGIFileServerApplication = wsgi_fileserver.WSGIFileServerApplication
application = WSGIFileServerApplication(root_path=os.path.abspath(directory), mount_point='/windmill-saves/')
from windmill.server import wsgi
wsgi.add_namespace('windmill-saves', application)
windmill.settings['SAVES_PATH'] = directory
windmill.teardown_directories.append(directory)
def test_object_transform_to_python(test):
"""Transform test object in to controller call in python."""
params = ', '.join([key+'='+repr(value) for key, value in test['params'].items()])
return 'client.%s(%s)' % (test['method'], params)
def build_python_test_file(tests, suite_name=None):
"""Build the test file for python"""
ts = '# Generated by the windmill services transformer\n'
ts += 'from windmill.authoring import WindmillTestClient\n\n'
if suite_name:
ts += 'def test_'+suite_name.replace('test_', '', 1)+'():\n'
else:
ts += 'def test():\n'
ts += ' client = WindmillTestClient(__name__)\n\n '
ts += '\n '.join([test_object_transform_to_python(test) for test in tests])
return ts
def create_python_test_file(suite_name, tests, location=None):
"""Transform and create and build the python test file"""
if location is None:
location = os.path.join(windmill.settings['SAVES_PATH'], suite_name+'.py')
f = open(location, 'w')
f.write(build_python_test_file(tests, suite_name.split('.')[0]))
f.flush()
f.close()
return get_save_url(suite_name, 'py')
def create_json_test_file(suite_name, tests, location=None):
"""Transform and create a json test file."""
if location is None:
location = os.path.join(windmill.settings['SAVES_PATH'], suite_name+'.json')
f = open(location, 'w')
for test in tests:
# Strip keys that aren't part of the api
test.pop('suite_name', None) ; test.pop('version', None)
f.write(json.dumps(test))
f.write('\n')
f.flush()
f.close()
return get_save_url(suite_name, 'json')
def test_object_transform_to_javascript(test):
"""Transform test object in to controller call in javascript."""
test = dict([(k, v,) for k, v in test.items() if k == 'method' or k == 'params'])
return json.dumps(test)
def build_javascript_test_file(tests, suite_name=None):
"""Build the test file for javascript"""
ts = '// Generated by the windmill services transformer\n'
if suite_name:
ts += 'var test_'+suite_name.replace('test_', '', 1).split('.')[0]+' = new function() {\n'
else:
ts += 'var test_one = new function() {\n'
ts += ' this.test_actions = [\n'
ts += ',\n'.join([test_object_transform_to_javascript(test) for test in tests])
ts += '\n ];\n'
ts += '}\n'
return ts
def create_javascript_test_file(suite_name, tests, location=None):
"""Transform and create and build the javascript test file"""
if location is None:
location = os.path.join(windmill.settings['SAVES_PATH'], suite_name+'.js')
f = open(location, 'w')
f.write(build_javascript_test_file(tests, suite_name))
f.flush()
f.close()
return get_save_url(suite_name, 'js')
registry = {'python':create_python_test_file, 'json':create_json_test_file, 'javascript': create_javascript_test_file}
| {
"content_hash": "68df20b0d51a2975c47beaeb7186cfed",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 118,
"avg_line_length": 39.73737373737374,
"alnum_prop": 0.6514997458057956,
"repo_name": "windmill/windmill",
"id": "2f1ac3f1e187977708f12db16c5b3649bf1bc47a",
"size": "4676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "windmill/authoring/transforms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "76128"
},
{
"name": "CSS",
"bytes": "113500"
},
{
"name": "HTML",
"bytes": "226277"
},
{
"name": "JavaScript",
"bytes": "1065858"
},
{
"name": "Makefile",
"bytes": "2367"
},
{
"name": "PHP",
"bytes": "4708"
},
{
"name": "Python",
"bytes": "575202"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
import os
import unittest
from tslib.readers import PiXmlReader
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
class TestPiXmlReader(unittest.TestCase):
def test_parse_pi_xml_01(self):
"""Parse a file."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
pass
self.assertTrue(True)
def test_parse_pi_xml_02(self):
"""Parse a file having comment elements."""
source = os.path.join(DATA_DIR, "GDresults_dam.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
pass
self.assertTrue(True)
def test_parse_pi_xml_03(self):
"""Parse a file with timeZone element."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
tz = reader.get_tz()
self.assertEqual(1.0, tz)
def test_parse_pi_xml_04(self):
"""Parse a file with empty timeZone element."""
source = os.path.join(DATA_DIR, "empty_tz.xml")
reader = PiXmlReader(source)
tz = reader.get_tz()
self.assertEqual(0.0, tz)
def test_parse_pi_xml_05(self):
"""Parse a file without timeZone element."""
source = os.path.join(DATA_DIR, "no_tz.xml")
reader = PiXmlReader(source)
tz = reader.get_tz()
self.assertEqual(None, tz)
def test_parse_pi_xml_06(self):
"""Parse a file without events ."""
source = os.path.join(DATA_DIR, "no_events.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
self.assertEqual(None, df)
def test_parse_pi_xml_07(self):
"""Parse a file."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
pass
self.assertTrue(True)
def test_parse_pi_xml_08(self):
"""Parse a file having comment elements."""
source = os.path.join(DATA_DIR, "GDresults_dam.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
pass
self.assertTrue(True)
def test_parse_pi_xml_09(self):
"""Parse a file without events ."""
source = os.path.join(DATA_DIR, "no_events.xml")
reader = PiXmlReader(source)
for md, df in reader.get_series():
self.assertEqual(None, df)
class BulkTestPiXmlReader(unittest.TestCase):
def test_parse_pi_xml_01(self):
"""Parse a file."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
pass
self.assertTrue(True)
def test_parse_pi_xml_02(self):
"""Parse a file having comment elements."""
source = os.path.join(DATA_DIR, "GDresults_dam.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
pass
self.assertTrue(True)
def test_parse_pi_xml_03(self):
"""Parse a file with timeZone element."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
tz = reader.get_tz()
self.assertEqual(1.0, tz)
def test_parse_pi_xml_06(self):
"""Parse a file without events ."""
source = os.path.join(DATA_DIR, "no_events.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
self.assertEqual(None, df)
def test_parse_pi_xml_07(self):
"""Parse a file."""
source = os.path.join(DATA_DIR, "time_series.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=300):
pass
self.assertTrue(True)
def test_parse_pi_xml_08(self):
"""Parse a file having comment elements."""
source = os.path.join(DATA_DIR, "GDresults_dam.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
pass
self.assertTrue(True)
def test_parse_pi_xml_09(self):
"""Parse a file without events ."""
source = os.path.join(DATA_DIR, "no_events.xml")
reader = PiXmlReader(source)
for md, df in reader.bulk_get_series(chunk_size=5):
self.assertEqual(None, df)
| {
"content_hash": "44f6a4bf578e6d2675cb30901db984bb",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 61,
"avg_line_length": 33.68181818181818,
"alnum_prop": 0.5883940620782726,
"repo_name": "nens/tslib",
"id": "d82d73648eecdf614cc2a52e1476871c354a96be",
"size": "4446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tslib/readers/tests/pi_xml_reader_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30541"
}
],
"symlink_target": ""
} |
from st2common.models.api.action import ActionAliasAPI
from st2tests.fixturesloader import FixturesLoader
from tests import FunctionalTest
FIXTURES_PACK = 'aliases'
TEST_MODELS = {
'aliases': ['alias1.yaml', 'alias2.yaml']
}
TEST_LOAD_MODELS = {
'aliases': ['alias3.yaml']
}
GENERIC_FIXTURES_PACK = 'generic'
TEST_LOAD_MODELS_GENERIC = {
'aliases': ['alias3.yaml']
}
class TestActionAlias(FunctionalTest):
models = None
alias1 = None
alias2 = None
alias3 = None
alias3_generic = None
@classmethod
def setUpClass(cls):
super(TestActionAlias, cls).setUpClass()
cls.models = FixturesLoader().save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
cls.alias1 = cls.models['aliases']['alias1.yaml']
cls.alias2 = cls.models['aliases']['alias2.yaml']
loaded_models = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_LOAD_MODELS)
cls.alias3 = loaded_models['aliases']['alias3.yaml']
loaded_models = FixturesLoader().load_models(fixtures_pack=GENERIC_FIXTURES_PACK,
fixtures_dict=TEST_LOAD_MODELS_GENERIC)
cls.alias3_generic = loaded_models['aliases']['alias3.yaml']
def test_get_all(self):
resp = self.app.get('/v1/actionalias')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 2, '/v1/actionalias did not return all aliases.')
retrieved_names = [alias['name'] for alias in resp.json]
self.assertEqual(retrieved_names, [self.alias1.name, self.alias2.name],
'Incorrect aliases retrieved.')
def test_get_one(self):
resp = self.app.get('/v1/actionalias/%s' % self.alias1.id)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json['name'], self.alias1.name,
'Incorrect aliases retrieved.')
def test_post_delete(self):
post_resp = self._do_post(vars(ActionAliasAPI.from_model(self.alias3)))
self.assertEqual(post_resp.status_int, 201)
get_resp = self.app.get('/v1/actionalias/%s' % post_resp.json['id'])
self.assertEqual(get_resp.status_int, 200)
self.assertEqual(get_resp.json['name'], self.alias3.name,
'Incorrect aliases retrieved.')
del_resp = self.__do_delete(post_resp.json['id'])
self.assertEqual(del_resp.status_int, 204)
get_resp = self.app.get('/v1/actionalias/%s' % post_resp.json['id'], expect_errors=True)
self.assertEqual(get_resp.status_int, 404)
def test_post_dup_name(self):
post_resp = self._do_post(vars(ActionAliasAPI.from_model(self.alias3)))
self.assertEqual(post_resp.status_int, 201)
post_resp_dup_name = self._do_post(vars(ActionAliasAPI.from_model(self.alias3_generic)))
self.assertEqual(post_resp_dup_name.status_int, 201)
self.__do_delete(post_resp.json['id'])
self.__do_delete(post_resp_dup_name.json['id'])
def test_match(self):
data = {'command': 'hello donny'}
resp = self.app.post_json("/v1/actionalias/match", data,
expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertEqual(str(resp.json['faultstring']), "Command 'hello donny' matched no patterns")
data = {'command': 'Lorem ipsum banana dolor sit pineapple amet.'}
resp = self.app.post_json("/v1/actionalias/match", data, expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertEqual(str(resp.json['faultstring']),
"Command 'Lorem ipsum banana dolor sit pineapple amet.' "
"matched more than 1 pattern")
def test_help(self):
data = {}
resp = self.app.post_json("/v1/actionalias/help", data, expect_errors=False)
self.assertEqual(resp.status_int, 202)
self.assertEqual(resp.json.get('available'), 2)
def test_help_args(self):
data = {"filter": ".*", "pack": "aliases", "limit": 1, "offset": 0}
resp = self.app.post_json("/v1/actionalias/help", data, expect_errors=False)
self.assertEqual(resp.status_int, 202)
self.assertEqual(resp.json.get('available'), 2)
self.assertEqual(len(resp.json.get('helpstrings').get("aliases")), 1)
def _do_post(self, actionalias, expect_errors=False):
return self.app.post_json('/v1/actionalias', actionalias, expect_errors=expect_errors)
def __do_delete(self, actionalias_id, expect_errors=False):
return self.app.delete('/v1/actionalias/%s' % actionalias_id, expect_errors=expect_errors)
| {
"content_hash": "8bd05d96e8e659ad9a4b02a26dd59e34",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 100,
"avg_line_length": 40.92372881355932,
"alnum_prop": 0.6208324704907848,
"repo_name": "peak6/st2",
"id": "8f690a2ae517836f490a2589905f43580a8bd47f",
"size": "5609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2api/tests/unit/controllers/v1/test_action_alias.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "42545"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4012891"
},
{
"name": "Shell",
"bytes": "41016"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
import frappe
from frappe.tests.utils import FrappeTestCase
class TestSequence(FrappeTestCase):
def generate_sequence_name(self) -> str:
return self._testMethodName + "_" + frappe.generate_hash(length=5)
def test_set_next_val(self):
seq_name = self.generate_sequence_name()
frappe.db.create_sequence(seq_name, check_not_exists=True, temporary=True)
next_val = frappe.db.get_next_sequence_val(seq_name)
frappe.db.set_next_sequence_val(seq_name, next_val + 1)
self.assertEqual(next_val + 1, frappe.db.get_next_sequence_val(seq_name))
next_val = frappe.db.get_next_sequence_val(seq_name)
frappe.db.set_next_sequence_val(seq_name, next_val + 1, is_val_used=True)
self.assertEqual(next_val + 2, frappe.db.get_next_sequence_val(seq_name))
def test_create_sequence(self):
seq_name = self.generate_sequence_name()
frappe.db.create_sequence(seq_name, max_value=2, cycle=True, temporary=True)
frappe.db.get_next_sequence_val(seq_name)
frappe.db.get_next_sequence_val(seq_name)
self.assertEqual(1, frappe.db.get_next_sequence_val(seq_name))
seq_name = self.generate_sequence_name()
frappe.db.create_sequence(seq_name, max_value=2, temporary=True)
frappe.db.get_next_sequence_val(seq_name)
frappe.db.get_next_sequence_val(seq_name)
try:
frappe.db.get_next_sequence_val(seq_name)
except frappe.db.SequenceGeneratorLimitExceeded:
pass
else:
self.fail("NEXTVAL didn't raise any error upon sequence's end")
# without this, we're not able to move further
# as postgres doesn't allow moving further in a transaction
# when an error occurs
frappe.db.rollback()
seq_name = self.generate_sequence_name()
frappe.db.create_sequence(seq_name, min_value=10, max_value=20, increment_by=5, temporary=True)
self.assertEqual(10, frappe.db.get_next_sequence_val(seq_name))
self.assertEqual(15, frappe.db.get_next_sequence_val(seq_name))
self.assertEqual(20, frappe.db.get_next_sequence_val(seq_name))
| {
"content_hash": "9d1d08d586f1cbe35fc55ae4cd006f7f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 97,
"avg_line_length": 39.857142857142854,
"alnum_prop": 0.7414234511008705,
"repo_name": "StrellaGroup/frappe",
"id": "c6ea0bc8c083a1880fc634afbe7927a54a2fb67a",
"size": "1953",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/tests/test_sequence.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250858"
},
{
"name": "JavaScript",
"bytes": "2515308"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3605011"
},
{
"name": "SCSS",
"bytes": "261492"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
} |
from twisted.internet.defer import inlineCallbacks, maybeDeferred, returnValue
from twisted.internet import reactor, defer
import uuid
import logging
import datetime
from pytz import utc
from telephus.cassandra.ttypes import InvalidRequestException, CfDef, ColumnDef, IndexExpression, IndexOperator
import copy
import math
import decimal
import re
from netaddr.ip import IPAddress
from netaddr.strategy import ipv4
from dateutil import parser
from operator import attrgetter, itemgetter
from attributes import *
from configuration import Configuration
from query import Query, QueryResult
from base_model import BaseModel, BaseModelMeta
class RowModelMeta(BaseModelMeta):
def __init__(cls, name, bases, attrs):
super(RowModelMeta, cls).__init__(name, bases, attrs)
for k, v in cls._attributes.items():
if getattr(v, 'row_key', False):
cls._row_key = (k, v)
if cls._row_key is None and cls.Meta.column_family:
raise Exception('No row_key found for non-primitive model.')
class RowModel(BaseModel):
__metaclass__ = RowModelMeta
_row_key = None
def _pre_save(self):
if self._is_new:
self._setattr(self._row_key[0], uuid.uuid1(), filter=False)
def _mutation_map_for_save(self):
insert_dict = {}
for k in self._dirty:
v = self._attribute_values[k]
if k != self._row_key[0]:
insert_dict[k] = self._getattr_for_db(k)
row_key = self._getattr_for_db(self._row_key[0])
mutation_map = {}
mutation_map.update({row_key: {self.Meta.column_family: insert_dict}})
return mutation_map
@inlineCallbacks
def delete(self, configuration=Configuration):
if self._row_key[1] is None:
returnValue(False)
yield
else:
yield configuration.cassandra_client.remove(getattr(self, self._row_key[0]).bytes, self.Meta.column_family)
self._setattr(self._row_key[0], None, filter=False)
self._setattr('date_modified', None, filter=False)
self._setattr('date_created', None, filter=False)
returnValue(True)
# @classmethod
# @inlineCallbacks
# def list(cls, predicate=None, start='', finish='', configuration=Configuration):
# key_slice = yield configuration.cassandra_client.get_range_slices(cls.Meta.column_family, start=start, finish=finish)
# objects = []
# for record in key_slice:
# o = cls()
# setattr(o, 'id', uuid.UUID(record.key).hex)
# for column in record.columns:
# setattr(o, column.column.name, column.column.value)
# if predicate is None or predicate(o):
# objects.append(o)
#
# returnValue(objects)
@classmethod
def _result_to_instance(cls, key, result):
o = cls(is_new=False)
o._setattr(o._row_key[0], key, filter=False)
for column in result:
o._setattr_from_db(column.column.name, column.column.value)
return o
@classmethod
def _result_to_dict(cls, key, result):
columns = {cls._row_key[0]: key}
for column in result:
attribute_name = column.column.name
p = cls._attributes[attribute_name] if attribute_name in cls._attributes else None
if p is None: raise Exception('Unknown attribute: %s' % attribute_name)
value = column.column.value
columns[attribute_name] = p.from_db_value(value)
return columns
@classmethod
@inlineCallbacks
def get(cls, key, configuration=Configuration):
# assert(isinstance(key, uuid.UUID))
#TODO assert key is same type as row_key attribute / support not UUID key
names = cls._attributes.keys()
record = yield configuration.cassandra_client.get_slice(key.bytes, cls.Meta.column_family, names=names)
if record == []:
returnValue(None)
o = cls._result_to_instance(key, record)
o._post_get()
returnValue(o)
@classmethod
@inlineCallbacks
# def filter(cls, filters=None, sorts=None, page=None, limit=None, configuration=Configuration):
def execute_query(cls, query=None, configuration=Configuration):
if query is None:
raise Exception('query is None!')
sorts = query._sorts or [cls._row_key[0]]
offset = query._offset or 0
limit = query._limit or 25
# get from memcache
# if not gotten from memcache:
preliminary_columns = []
for x in sorts:
preliminary_columns.append(x.lstrip('+-'))
if len(sorts) > 1:
raise Exception("Multiple order clauses not supported.")
order = sorts[0]
reverse_sort = order.startswith('-')
order_key_name = order.lstrip('+-')
excludes = {}
for i, e in enumerate(query._expressions):
if e.op == IndexOperator.NE:
excludes[e.column_name] = e.value
preliminary_columns.append(e.column_name)
del query._expressions[i]
preliminary_results = yield configuration.cassandra_client.get_indexed_slices(cls.Meta.column_family, query._expressions, names=preliminary_columns, start_key='', count=configuration.count, column_count=configuration.column_count)
preliminary_results = [cls._result_to_dict(r.key, r.columns) for r in preliminary_results]
def check_excludes(cols):
for k, v in excludes.items():
p = cls._attributes[k] if k in cls._attributes else None
if p is None: raise Exception('Unknown attribute: %s' % k)
attr = cols[k] if k in cols else None
if attr is not None and cols[k] == v:
return False
return True
matching_results = filter(check_excludes, preliminary_results)
sorted_matching_results = sorted(matching_results, key=itemgetter(order_key_name))
matching_keys = [r[cls._row_key[0]] for r in sorted_matching_results]
l = len(matching_keys)
fetch_keys = matching_keys[offset:offset+limit] if not reverse_sort else matching_keys[l-offset-1:l-offset-limit-1:-1]
search_results = yield configuration.cassandra_client.multiget_slice(fetch_keys, cls.Meta.column_family, count=limit)
results = []
for (key, columns) in search_results.items():
results.append(cls._result_to_instance(uuid.UUID(bytes=key), columns))
sorted_results = sorted(results, key=attrgetter(order_key_name))
if reverse_sort:
sorted_results.reverse()
returnValue(QueryResult(sorted_results, l))
def as_dict(self, properties=None):
if properties is None:
properties = self._attributes.keys()
return dict((name, getattr(self, name, None)) for name in properties)
| {
"content_hash": "3b93600ab98260b169e490ea6f342926",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 238,
"avg_line_length": 40.45762711864407,
"alnum_prop": 0.609412093283061,
"repo_name": "amorton/Polydorus",
"id": "6f3d3087a138739b69b7fde8dbc24bcdeb1691d4",
"size": "7161",
"binary": false,
"copies": "1",
"ref": "refs/heads/unstable",
"path": "polydorus/row_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49962"
}
],
"symlink_target": ""
} |
'''
Created on 2014/03/21
@author: yamashiro-r
'''
import sys
from conv.combined import Combined
from conv.dummy import Dummy
class ConverterFactory(object):
"""
Create Log Converter Factory
Created on 2014/03/21
"""
"""
Initialize
"""
def __init__(self, logFormat = ""):
self.logFormat = logFormat
print "ConvertFactory init. format is {logFormat}".format(logFormat = self.logFormat)
# self.printSysPath()
def printSysPath(self):
for line in sys.path:
print line
"""
Create Converter Object Factory
"""
def createConv(self):
print "call getConverter. logFormat is {logFormat}".format(logFormat = self.logFormat)
if self.logFormat == "combined":
return Combined()
else:
return Dummy()
| {
"content_hash": "cb9c119236d049ca66473874fa681a5e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 94,
"avg_line_length": 22.07894736842105,
"alnum_prop": 0.6090584028605482,
"repo_name": "yamashiro0110/converter",
"id": "6946bc6a386de078e4dfef936de995e444746af3",
"size": "856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/factory/converterFactory.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from Tkinter import *
import ttk
root = Tk()
def printt(msg):
print msg
ttk.Button(root, text="Hello World", command= lambda: printt('ping').grid())
root.mainloop()
| {
"content_hash": "129e8bacf94b01c7569dfcc1eb225283",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 19,
"alnum_prop": 0.6900584795321637,
"repo_name": "erickmusembi/Robot-Project",
"id": "4490d06ccc195e527b0ba88b9d28a12b45c1f7f0",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Robot Project/tests/hello_world.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7498"
},
{
"name": "Python",
"bytes": "70467"
}
],
"symlink_target": ""
} |
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import os
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
%(stdout)s
**Total running time of the example:** %(time_elapsed) 4i seconds
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
def extract_docstring(filename):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/..')
example_dir = os.path.join(example_dir, 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 10px;
width: auto;
height: 200px;
width: 180px;
}
.figure img {
display: inline;
}
.figure .caption {
width: 170px;
text-align: center !important;
}
</style>
Code examples
=================
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
#for dir in sorted(os.listdir(example_dir)):
# if os.path.isdir(os.path.join(example_dir, dir)):
# generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file'
% src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def sort_key(a):
# put last elements without a plot
if not a.startswith('plot') and a.endswith('.py'):
return 'zz' + a
return a
for fname in sorted(os.listdir(src_dir), key=sort_key):
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, plot_gallery)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`example_%s`
.. toctree::
:hidden:
%s/%s
""" % (link_name, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if (not os.path.exists(first_image_file) or
os.stat(first_image_file).st_mtime <=
os.stat(src_file).st_mtime):
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
fig_file_name = image_path % fig_num
try:
plt.tight_layout(pad=.5)
except Exception:
# tight_layout is present only is recent versions
# of matplotlib
pass
plt.savefig(fig_file_name)
figure_list.append(image_fname % fig_num)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % time_elapsed
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
from matplotlib import image
if os.path.exists(first_image_file):
image.thumbnail(first_image_file, thumb_file, 0.2)
if not os.path.exists(thumb_file):
# create something not to replace the thumbnail
shutil.copy('images/blank_image.png', thumb_file)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| {
"content_hash": "2efe3c7681db0706beea69d759ee76d6",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 80,
"avg_line_length": 33.25609756097561,
"alnum_prop": 0.5499083241657499,
"repo_name": "rain1024/sklearn_tutorial",
"id": "40358af3aacfcedd93230a0eabf4b94e230d24af",
"size": "13635",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "doc/sphinxext/gen_rst.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1121"
}
],
"symlink_target": ""
} |
"""Module for optimizers in Oryx."""
from oryx.experimental.optimizers.optix import adam
from oryx.experimental.optimizers.optix import gradient_descent
from oryx.experimental.optimizers.optix import noisy_sgd
from oryx.experimental.optimizers.optix import optimize
from oryx.experimental.optimizers.optix import rmsprop
from oryx.experimental.optimizers.optix import sgd
| {
"content_hash": "f20c9ae9389a0357025f1866bdda44ac",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 63,
"avg_line_length": 53.142857142857146,
"alnum_prop": 0.8494623655913979,
"repo_name": "jax-ml/oryx",
"id": "d49d2ef4faabffa6d6b94687d8ac085a512b482c",
"size": "954",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "oryx/experimental/optimizers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "61268"
},
{
"name": "Python",
"bytes": "593885"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.word_embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.word_embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers, self.all_attention_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_all_attention_layers(self):
return self.all_attention_layers
def get_word_embedding_output(self):
"""Get output of the word(piece) embedding lookup.
This is BEFORE positional embeddings and token type embeddings have been
added.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the word(piece) embedding layer.
"""
return self.word_embedding_output
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
all_attention_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
all_attention_outputs.append(attention_output)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
final_attention_outputs = []
for layer_output, attn_output in zip(all_layer_outputs,
all_attention_outputs):
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
final_attn_output = reshape_from_matrix(attn_output, input_shape)
final_attention_outputs.append(final_attn_output)
return final_outputs, final_attention_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| {
"content_hash": "3e4c7047a954d511534f9fd0123d4987",
"timestamp": "",
"source": "github",
"line_count": 993,
"max_line_length": 93,
"avg_line_length": 38.37462235649547,
"alnum_prop": 0.6577704298535664,
"repo_name": "google/embedding-tests",
"id": "d4a436aa51e3911c1f68f0350f0aff2b2b01655f",
"size": "38682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/bert/modeling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "502131"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.random_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class RandomNormalTest(tf.test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
rng = tf.random_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in tf.float32, tf.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = tf.random_normal(shape, 0.0, 1.0, tf.float32)
rnd2 = tf.random_normal(shape, 0.0, 1.0, tf.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class TruncatedNormalTest(tf.test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
rng = tf.truncated_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
# NOTE: RandomParameters on GPU is not supported.
for use_gpu in [False]:
for dt in tf.float32, tf.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in tf.float32, tf.float64:
results = {}
for use_gpu in [False, True]:
# We need a particular larger number of samples to test multiple rounds
# on GPU
sampler = self._Sampler(1000000, 0.0, 1.0, dt, use_gpu=use_gpu,
seed=12345)
results[use_gpu] = sampler()
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
# The effective standard deviation of truncated normal is 85% of the
# requested one.
def testStdDev(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
stddev = 3.0
sampler = self._Sampler(100000, 0.0, stddev, dt, use_gpu=use_gpu)
x = sampler()
print("std(x)", np.std(x), abs(np.std(x) / stddev - 0.85))
self.assertTrue(abs(np.std(x) / stddev - 0.85) < 0.04)
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
rnd2 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class RandomUniformTest(tf.test.TestCase):
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
rng = tf.random_uniform(
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
def testRange(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sampler = self._Sampler(1000, -2., 8., dt, use_gpu=use_gpu)
x = sampler()
self.assertTrue(-2 <= np.min(x))
self.assertTrue(np.max(x) <= 8)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in tf.float32, tf.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = tf.random_uniform(shape, 0.0, 1.0,
dtype=tf.float32)
rnd2 = tf.random_uniform(shape, 0.0, 1.0,
dtype=tf.float32)
diff = (rnd2 - rnd1).eval()
self.assertTrue(np.linalg.norm(diff) > 0.1)
class RandomShapeTest(tf.test.TestCase):
def testRandomParameters(self):
# Fully known shape.
rnd1 = tf.truncated_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = tf.truncated_normal(tf.placeholder(tf.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = tf.truncated_normal(tf.placeholder(tf.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomNormal(self):
# Fully known shape.
rnd1 = tf.random_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = tf.random_normal(tf.placeholder(tf.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = tf.random_normal(tf.placeholder(tf.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomUniform(self):
# Fully known shape.
rnd1 = tf.random_uniform([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = tf.random_uniform(
tf.placeholder(tf.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = tf.random_uniform(tf.placeholder(tf.int32))
self.assertIs(None, rnd3.get_shape().ndims)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "938188733a94ffe99168e13b08522ac2",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 36.4390243902439,
"alnum_prop": 0.6018518518518519,
"repo_name": "pavlovml/tensorflow",
"id": "2ba4dac3a1a4defe1be93b83e4863299a485fc89",
"size": "8964",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/random_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127104"
},
{
"name": "C++",
"bytes": "4910453"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "637366"
},
{
"name": "Java",
"bytes": "44388"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "45213"
},
{
"name": "Python",
"bytes": "2480267"
},
{
"name": "Shell",
"bytes": "4262"
},
{
"name": "TypeScript",
"bytes": "237684"
}
],
"symlink_target": ""
} |
"""Site sources."""
# Copyright (c) 2012-2022 Wibowo Arindrarto <contact@arindrarto.dev>
# SPDX-License-Identifier: BSD-3-Clause
from contextlib import suppress
from dataclasses import dataclass
from datetime import datetime as dt
from functools import cached_property
from pathlib import Path
from typing import cast, Optional
from urllib.parse import urljoin
import pendulum
import yaml
from jinja2 import Template
from markdown2 import Markdown
from pendulum.datetime import DateTime
from slugify import slugify
from yaml import SafeLoader
from . import constants
from .error import VoltResourceError
from .config import Config
from .targets import TemplateTarget
__all__ = ["FileSource", "Markdown2Source", "Source"]
_MD = Markdown(
extras={
"fenced-code-blocks": {
"nowrap": False,
"full": False,
"title": "",
"noclasses": False,
"classprefix": "",
"cssclass": "hl",
"csstyles": "",
"prestyles": "",
"cssfile": "",
"noclobber_cssfile": False,
"linenos": False,
"hl_lines": [],
"linenostart": 1,
"linenostep": 1,
"linenospecial": 0,
"nobackground": False,
"lineseparator": "\n",
"lineanchors": "",
"anchorlinenos": False,
},
"markdown-in-html": {},
"header-ids": {},
}
)
@dataclass(kw_only=True)
class Source:
"""A source for the site content."""
# Metadata of the content.
meta: dict
# Site configuration.
config: Config
# Whether the content is draft or not.
is_draft: bool
@dataclass(kw_only=True)
class FileSource(Source):
"""A source on the filesystem for the site content."""
# FileSystem path to the source content.
src: Path
@dataclass(kw_only=True, eq=False)
class Markdown2Source(FileSource):
"""A markdown source parsed using the markdown2 library."""
# Markdown text of the body, without any metadata.
body: str
@classmethod
def from_path(
cls,
src: Path,
config: Config,
meta: Optional[dict] = None,
is_draft: bool = False,
fm_sep: str = constants.FRONT_MATTER_SEP,
) -> "Markdown2Source":
"""Create an instance from a file.
:param src: Path to the source file.
:param config: Site configuration.
:param meta: Optional metadata to inject.
:param fm_sep: String for separating the markdown front matter.
."""
raw_text = src.read_text()
*top, raw_body = raw_text.split(fm_sep, 2)
raw_fm = [item for item in top if item]
fm = {} if not raw_fm else yaml.load(raw_fm[0].strip(), Loader=SafeLoader)
return cls(
body=raw_body,
src=src,
# TODO: Validate minimal front matter metadata.
meta={
"labels": {},
"title": None,
"is_draft": is_draft,
**fm,
**(meta or {}),
},
config=config,
is_draft=is_draft,
)
@cached_property
def url(self) -> str:
config = self.config
url_key = "url"
parts = (
[part for part in self.meta[url_key].split("/") if part]
if self.meta.get(url_key) is not None
else [f"{slugify(self.title, replacements=config.slug_replacements)}.html"]
)
ps = [*(self.src.parent.parts[config.num_common_parts :]), *parts]
if self.is_draft:
with suppress(IndexError):
# NOTE: This assumes that the `drafts` folder is located at the same
# level as non-draft files.
del ps[-2]
return f"/{'/'.join(ps)}"
@property
def url_abs(self) -> str:
return urljoin(self.config.url, self.url)
@property
def title(self) -> str:
return cast(str, self.meta["title"])
@cached_property
def pub_time(self) -> Optional[DateTime]:
value = self.meta.get("pub_time", None)
exc = VoltResourceError(
f"value {value!r} in {str(self.src)!r} is not a valid datetime"
)
if value is None:
return value
if isinstance(value, str):
rv = pendulum.parse(value)
if isinstance(rv, DateTime):
return rv
raise exc
if isinstance(value, dt):
return pendulum.instance(value)
raise exc
@cached_property
def html(self) -> str:
return cast(str, _MD.convert(self.body))
def to_template_target(self, template: Template) -> TemplateTarget:
"""Create a :class:`TemplateTarget` instance."""
return TemplateTarget(
url=self.url,
template=template,
render_kwargs={
"meta": self.meta,
"content": self.html,
},
src=self.src.relative_to(self.config.project_dir),
)
| {
"content_hash": "81cbb1af98d433a2b2aace85a3be1fa8",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 87,
"avg_line_length": 27.338709677419356,
"alnum_prop": 0.5596853490658801,
"repo_name": "bow/volt",
"id": "1bd8334ecde1e63bcbf9b1aa8280c2e57d5cbb67",
"size": "5085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "volt/sources.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1267"
},
{
"name": "Jinja",
"bytes": "883"
},
{
"name": "Makefile",
"bytes": "5475"
},
{
"name": "Python",
"bytes": "124895"
}
],
"symlink_target": ""
} |
import logging
from typing import Optional
from django.db.models import Q
from django.http import HttpRequest, HttpResponse
from zerver.actions.message_send import (
check_send_private_message,
send_rate_limited_pm_notification_to_bot_owner,
)
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, RequestNotes, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.send_email import FromAddress
from zerver.lib.validator import WildValue, check_string, to_wild_value
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import Realm, UserProfile
MISCONFIGURED_PAYLOAD_TYPE_ERROR_MESSAGE = """
Hi there! Your bot {bot_name} just received a TeamCity payload in a
format that Zulip doesn't recognize. This usually indicates a
configuration issue in your TeamCity webhook settings. Please make sure
that you set the **Payload Format** option to **Legacy Webhook (JSON)**
in your TeamCity webhook configuration. Contact {support_email} if you
need further help!
"""
def guess_zulip_user_from_teamcity(teamcity_username: str, realm: Realm) -> Optional[UserProfile]:
try:
# Try to find a matching user in Zulip
# We search a user's full name, short name,
# and beginning of email address
user = UserProfile.objects.filter(
Q(full_name__iexact=teamcity_username) | Q(email__istartswith=teamcity_username),
is_active=True,
realm=realm,
).order_by("id")[0]
return user
except IndexError:
return None
def get_teamcity_property_value(property_list: WildValue, name: str) -> Optional[str]:
for property in property_list:
if property["name"].tame(check_string) == name:
return property["value"].tame(check_string)
return None
@webhook_view("TeamCity")
@has_request_variables
def api_teamcity_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: WildValue = REQ(argument_type="body", converter=to_wild_value),
) -> HttpResponse:
if "build" not in payload:
# Ignore third-party specific (e.g. Slack) payload formats
# and notify the bot owner
error_message = MISCONFIGURED_PAYLOAD_TYPE_ERROR_MESSAGE.format(
bot_name=user_profile.full_name,
support_email=FromAddress.SUPPORT,
).strip()
send_rate_limited_pm_notification_to_bot_owner(
user_profile, user_profile.realm, error_message
)
return json_success(request)
message = payload.get("build")
build_name = message["buildFullName"].tame(check_string)
build_url = message["buildStatusUrl"].tame(check_string)
changes_url = build_url + "&tab=buildChangesDiv"
build_number = message["buildNumber"].tame(check_string)
build_result = message["buildResult"].tame(check_string)
build_result_delta = message["buildResultDelta"].tame(check_string)
build_status = message["buildStatus"].tame(check_string)
if build_result == "success":
if build_result_delta == "fixed":
status = "has been fixed! :thumbs_up:"
else:
status = "was successful! :thumbs_up:"
elif build_result == "failure":
if build_result_delta == "broken":
status = f"is broken with status {build_status}! :thumbs_down:"
else:
status = f"is still broken with status {build_status}! :thumbs_down:"
elif build_result == "running":
status = "has started."
template = """
{build_name} build {build_id} {status} See [changes]\
({changes_url}) and [build log]({log_url}).
""".strip()
body = template.format(
build_name=build_name,
build_id=build_number,
status=status,
changes_url=changes_url,
log_url=build_url,
)
if "branchDisplayName" in message:
topic = "{} ({})".format(build_name, message["branchDisplayName"].tame(check_string))
else:
topic = build_name
# Check if this is a personal build, and if so try to private message the user who triggered it.
if (
get_teamcity_property_value(message["teamcityProperties"], "env.BUILD_IS_PERSONAL")
== "true"
):
# The triggeredBy field gives us the teamcity user full name, and the
# "teamcity.build.triggeredBy.username" property gives us the teamcity username.
# Let's try finding the user email from both.
teamcity_fullname = message["triggeredBy"].tame(check_string).split(";")[0]
teamcity_user = guess_zulip_user_from_teamcity(teamcity_fullname, user_profile.realm)
if teamcity_user is None:
teamcity_shortname = get_teamcity_property_value(
message["teamcityProperties"], "teamcity.build.triggeredBy.username"
)
if teamcity_shortname is not None:
teamcity_user = guess_zulip_user_from_teamcity(
teamcity_shortname, user_profile.realm
)
if teamcity_user is None:
# We can't figure out who started this build - there's nothing we can do here.
logging.info(
"TeamCity webhook couldn't find a matching Zulip user for "
"TeamCity user '%s' or '%s'",
teamcity_fullname,
teamcity_shortname,
)
return json_success(request)
body = f"Your personal build for {body}"
client = RequestNotes.get_notes(request).client
assert client is not None
check_send_private_message(user_profile, client, teamcity_user, body)
return json_success(request)
check_send_webhook_message(request, user_profile, topic, body)
return json_success(request)
| {
"content_hash": "7d0f9a153076389878f1e24bfc428a6a",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 100,
"avg_line_length": 38.77852348993289,
"alnum_prop": 0.6588785046728972,
"repo_name": "rht/zulip",
"id": "29eee9ff449aee4a3d296c4e609c024c69411970",
"size": "5814",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "zerver/webhooks/teamcity/view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "489438"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "743287"
},
{
"name": "Handlebars",
"bytes": "374049"
},
{
"name": "JavaScript",
"bytes": "4000260"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10160680"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284836"
}
],
"symlink_target": ""
} |
import datetime
from django import forms
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class BetterAuthor(Author):
write_speed = models.IntegerField()
class Book(models.Model):
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title'),
)
ordering = ['id']
def __unicode__(self):
return self.title
class BookWithCustomPK(models.Model):
my_pk = models.DecimalField(max_digits=5, decimal_places=0, primary_key=True)
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
def __unicode__(self):
return u'%s: %s' % (self.my_pk, self.title)
class Editor(models.Model):
name = models.CharField(max_length=100)
class BookWithOptionalAltEditor(models.Model):
author = models.ForeignKey(Author)
# Optional secondary author
alt_editor = models.ForeignKey(Editor, blank=True, null=True)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title', 'alt_editor'),
)
def __unicode__(self):
return self.title
class AlternateBook(Book):
notes = models.CharField(max_length=100)
def __unicode__(self):
return u'%s - %s' % (self.title, self.notes)
class AuthorMeeting(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
created = models.DateField(editable=False)
def __unicode__(self):
return self.name
class CustomPrimaryKey(models.Model):
my_pk = models.CharField(max_length=10, primary_key=True)
some_field = models.CharField(max_length=100)
# models for inheritance tests.
class Place(models.Model):
name = models.CharField(max_length=50)
city = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Owner(models.Model):
auto_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
place = models.ForeignKey(Place)
def __unicode__(self):
return "%s at %s" % (self.name, self.place)
class Location(models.Model):
place = models.ForeignKey(Place, unique=True)
# this is purely for testing the data doesn't matter here :)
lat = models.CharField(max_length=100)
lon = models.CharField(max_length=100)
class OwnerProfile(models.Model):
owner = models.OneToOneField(Owner, primary_key=True)
age = models.PositiveIntegerField()
def __unicode__(self):
return "%s is %d" % (self.owner.name, self.age)
class Restaurant(Place):
serves_pizza = models.BooleanField()
def __unicode__(self):
return self.name
class Product(models.Model):
slug = models.SlugField(unique=True)
def __unicode__(self):
return self.slug
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __unicode__(self):
return u"%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class MexicanRestaurant(Restaurant):
serves_tacos = models.BooleanField()
class ClassyMexicanRestaurant(MexicanRestaurant):
restaurant = models.OneToOneField(MexicanRestaurant, parent_link=True, primary_key=True)
tacos_are_yummy = models.BooleanField()
# models for testing unique_together validation when a fk is involved and
# using inlineformset_factory.
class Repository(models.Model):
name = models.CharField(max_length=25)
def __unicode__(self):
return self.name
class Revision(models.Model):
repository = models.ForeignKey(Repository)
revision = models.CharField(max_length=40)
class Meta:
unique_together = (("repository", "revision"),)
def __unicode__(self):
return u"%s (%s)" % (self.revision, unicode(self.repository))
# models for testing callable defaults (see bug #7975). If you define a model
# with a callable default value, you cannot rely on the initial value in a
# form.
class Person(models.Model):
name = models.CharField(max_length=128)
class Membership(models.Model):
person = models.ForeignKey(Person)
date_joined = models.DateTimeField(default=datetime.datetime.now)
karma = models.IntegerField()
# models for testing a null=True fk to a parent
class Team(models.Model):
name = models.CharField(max_length=100)
class Player(models.Model):
team = models.ForeignKey(Team, null=True)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
# Models for testing custom ModelForm save methods in formsets and inline formsets
class Poet(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Poem(models.Model):
poet = models.ForeignKey(Poet)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __unicode__(self):
return self.name
__test__ = {'API_TESTS': """
>>> from datetime import date
>>> from django.forms.models import modelformset_factory
>>> qs = Author.objects.all()
>>> AuthorFormSet = modelformset_factory(Author, extra=3)
>>> formset = AuthorFormSet(queryset=qs)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /><input type="hidden" name="form-0-id" id="id_form-0-id" /></p>
<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /><input type="hidden" name="form-1-id" id="id_form-1-id" /></p>
<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>
>>> data = {
... 'form-TOTAL_FORMS': '3', # the number of forms rendered
... 'form-INITIAL_FORMS': '0', # the number of forms with initial data
... 'form-MAX_NUM_FORMS': '', # the max number of forms
... 'form-0-name': 'Charles Baudelaire',
... 'form-1-name': 'Arthur Rimbaud',
... 'form-2-name': '',
... }
>>> formset = AuthorFormSet(data=data, queryset=qs)
>>> formset.is_valid()
True
>>> formset.save()
[<Author: Charles Baudelaire>, <Author: Arthur Rimbaud>]
>>> for author in Author.objects.order_by('name'):
... print author.name
Arthur Rimbaud
Charles Baudelaire
Gah! We forgot Paul Verlaine. Let's create a formset to edit the existing
authors with an extra form to add him. We *could* pass in a queryset to
restrict the Author objects we edit, but in this case we'll use it to display
them in alphabetical order by name.
>>> qs = Author.objects.order_by('name')
>>> AuthorFormSet = modelformset_factory(Author, extra=1, can_delete=False)
>>> formset = AuthorFormSet(queryset=qs)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /><input type="hidden" name="form-0-id" value="2" id="id_form-0-id" /></p>
<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /><input type="hidden" name="form-1-id" value="1" id="id_form-1-id" /></p>
<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>
>>> data = {
... 'form-TOTAL_FORMS': '3', # the number of forms rendered
... 'form-INITIAL_FORMS': '2', # the number of forms with initial data
... 'form-MAX_NUM_FORMS': '', # the max number of forms
... 'form-0-id': '2',
... 'form-0-name': 'Arthur Rimbaud',
... 'form-1-id': '1',
... 'form-1-name': 'Charles Baudelaire',
... 'form-2-name': 'Paul Verlaine',
... }
>>> formset = AuthorFormSet(data=data, queryset=qs)
>>> formset.is_valid()
True
# Only changed or new objects are returned from formset.save()
>>> formset.save()
[<Author: Paul Verlaine>]
>>> for author in Author.objects.order_by('name'):
... print author.name
Arthur Rimbaud
Charles Baudelaire
Paul Verlaine
This probably shouldn't happen, but it will. If an add form was marked for
deltetion, make sure we don't save that form.
>>> qs = Author.objects.order_by('name')
>>> AuthorFormSet = modelformset_factory(Author, extra=1, can_delete=True)
>>> formset = AuthorFormSet(queryset=qs)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /></p>
<p><label for="id_form-0-DELETE">Delete:</label> <input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE" /><input type="hidden" name="form-0-id" value="2" id="id_form-0-id" /></p>
<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /></p>
<p><label for="id_form-1-DELETE">Delete:</label> <input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE" /><input type="hidden" name="form-1-id" value="1" id="id_form-1-id" /></p>
<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" value="Paul Verlaine" maxlength="100" /></p>
<p><label for="id_form-2-DELETE">Delete:</label> <input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE" /><input type="hidden" name="form-2-id" value="3" id="id_form-2-id" /></p>
<p><label for="id_form-3-name">Name:</label> <input id="id_form-3-name" type="text" name="form-3-name" maxlength="100" /></p>
<p><label for="id_form-3-DELETE">Delete:</label> <input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE" /><input type="hidden" name="form-3-id" id="id_form-3-id" /></p>
>>> data = {
... 'form-TOTAL_FORMS': '4', # the number of forms rendered
... 'form-INITIAL_FORMS': '3', # the number of forms with initial data
... 'form-MAX_NUM_FORMS': '', # the max number of forms
... 'form-0-id': '2',
... 'form-0-name': 'Arthur Rimbaud',
... 'form-1-id': '1',
... 'form-1-name': 'Charles Baudelaire',
... 'form-2-id': '3',
... 'form-2-name': 'Paul Verlaine',
... 'form-3-name': 'Walt Whitman',
... 'form-3-DELETE': 'on',
... }
>>> formset = AuthorFormSet(data=data, queryset=qs)
>>> formset.is_valid()
True
# No objects were changed or saved so nothing will come back.
>>> formset.save()
[]
>>> for author in Author.objects.order_by('name'):
... print author.name
Arthur Rimbaud
Charles Baudelaire
Paul Verlaine
Let's edit a record to ensure save only returns that one record.
>>> data = {
... 'form-TOTAL_FORMS': '4', # the number of forms rendered
... 'form-INITIAL_FORMS': '3', # the number of forms with initial data
... 'form-MAX_NUM_FORMS': '', # the max number of forms
... 'form-0-id': '2',
... 'form-0-name': 'Walt Whitman',
... 'form-1-id': '1',
... 'form-1-name': 'Charles Baudelaire',
... 'form-2-id': '3',
... 'form-2-name': 'Paul Verlaine',
... 'form-3-name': '',
... 'form-3-DELETE': '',
... }
>>> formset = AuthorFormSet(data=data, queryset=qs)
>>> formset.is_valid()
True
# One record has changed.
>>> formset.save()
[<Author: Walt Whitman>]
Test the behavior of commit=False and save_m2m
>>> meeting = AuthorMeeting.objects.create(created=date.today())
>>> meeting.authors = Author.objects.all()
# create an Author instance to add to the meeting.
>>> new_author = Author.objects.create(name=u'John Steinbeck')
>>> AuthorMeetingFormSet = modelformset_factory(AuthorMeeting, extra=1, can_delete=True)
>>> data = {
... 'form-TOTAL_FORMS': '2', # the number of forms rendered
... 'form-INITIAL_FORMS': '1', # the number of forms with initial data
... 'form-MAX_NUM_FORMS': '', # the max number of forms
... 'form-0-id': '1',
... 'form-0-name': '2nd Tuesday of the Week Meeting',
... 'form-0-authors': [2, 1, 3, 4],
... 'form-1-name': '',
... 'form-1-authors': '',
... 'form-1-DELETE': '',
... }
>>> formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all())
>>> formset.is_valid()
True
>>> instances = formset.save(commit=False)
>>> for instance in instances:
... instance.created = date.today()
... instance.save()
>>> formset.save_m2m()
>>> instances[0].authors.all()
[<Author: Charles Baudelaire>, <Author: John Steinbeck>, <Author: Paul Verlaine>, <Author: Walt Whitman>]
# delete the author we created to allow later tests to continue working.
>>> new_author.delete()
Test the behavior of max_num with model formsets. It should allow all existing
related objects/inlines for a given object to be displayed, but not allow
the creation of new inlines beyond max_num.
>>> qs = Author.objects.order_by('name')
>>> AuthorFormSet = modelformset_factory(Author, max_num=None, extra=3)
>>> formset = AuthorFormSet(queryset=qs)
>>> len(formset.forms)
6
>>> len(formset.extra_forms)
3
>>> AuthorFormSet = modelformset_factory(Author, max_num=4, extra=3)
>>> formset = AuthorFormSet(queryset=qs)
>>> len(formset.forms)
4
>>> len(formset.extra_forms)
1
>>> AuthorFormSet = modelformset_factory(Author, max_num=0, extra=3)
>>> formset = AuthorFormSet(queryset=qs)
>>> len(formset.forms)
3
>>> len(formset.extra_forms)
0
>>> AuthorFormSet = modelformset_factory(Author, max_num=None)
>>> formset = AuthorFormSet(queryset=qs)
>>> [x.name for x in formset.get_queryset()]
[u'Charles Baudelaire', u'Paul Verlaine', u'Walt Whitman']
>>> AuthorFormSet = modelformset_factory(Author, max_num=0)
>>> formset = AuthorFormSet(queryset=qs)
>>> [x.name for x in formset.get_queryset()]
[u'Charles Baudelaire', u'Paul Verlaine', u'Walt Whitman']
>>> AuthorFormSet = modelformset_factory(Author, max_num=4)
>>> formset = AuthorFormSet(queryset=qs)
>>> [x.name for x in formset.get_queryset()]
[u'Charles Baudelaire', u'Paul Verlaine', u'Walt Whitman']
# ModelForm with a custom save method in a formset ###########################
>>> class PoetForm(forms.ModelForm):
... def save(self, commit=True):
... # change the name to "Vladimir Mayakovsky" just to be a jerk.
... author = super(PoetForm, self).save(commit=False)
... author.name = u"Vladimir Mayakovsky"
... if commit:
... author.save()
... return author
>>> PoetFormSet = modelformset_factory(Poet, form=PoetForm)
>>> data = {
... 'form-TOTAL_FORMS': '3', # the number of forms rendered
... 'form-INITIAL_FORMS': '0', # the number of forms with initial data
... 'form-MAX_NUM_FORMS': '', # the max number of forms
... 'form-0-name': 'Walt Whitman',
... 'form-1-name': 'Charles Baudelaire',
... 'form-2-name': '',
... }
>>> qs = Poet.objects.all()
>>> formset = PoetFormSet(data=data, queryset=qs)
>>> formset.is_valid()
True
>>> formset.save()
[<Poet: Vladimir Mayakovsky>, <Poet: Vladimir Mayakovsky>]
# Model inheritance in model formsets ########################################
>>> BetterAuthorFormSet = modelformset_factory(BetterAuthor)
>>> formset = BetterAuthorFormSet()
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /></p>
<p><label for="id_form-0-write_speed">Write speed:</label> <input type="text" name="form-0-write_speed" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr" /></p>
>>> data = {
... 'form-TOTAL_FORMS': '1', # the number of forms rendered
... 'form-INITIAL_FORMS': '0', # the number of forms with initial data
... 'form-MAX_NUM_FORMS': '', # the max number of forms
... 'form-0-author_ptr': '',
... 'form-0-name': 'Ernest Hemingway',
... 'form-0-write_speed': '10',
... }
>>> formset = BetterAuthorFormSet(data)
>>> formset.is_valid()
True
>>> formset.save()
[<BetterAuthor: Ernest Hemingway>]
>>> hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk
>>> formset = BetterAuthorFormSet()
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Ernest Hemingway" maxlength="100" /></p>
<p><label for="id_form-0-write_speed">Write speed:</label> <input type="text" name="form-0-write_speed" value="10" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" value="..." id="id_form-0-author_ptr" /></p>
<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /></p>
<p><label for="id_form-1-write_speed">Write speed:</label> <input type="text" name="form-1-write_speed" id="id_form-1-write_speed" /><input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr" /></p>
>>> data = {
... 'form-TOTAL_FORMS': '2', # the number of forms rendered
... 'form-INITIAL_FORMS': '1', # the number of forms with initial data
... 'form-MAX_NUM_FORMS': '', # the max number of forms
... 'form-0-author_ptr': hemingway_id,
... 'form-0-name': 'Ernest Hemingway',
... 'form-0-write_speed': '10',
... 'form-1-author_ptr': '',
... 'form-1-name': '',
... 'form-1-write_speed': '',
... }
>>> formset = BetterAuthorFormSet(data)
>>> formset.is_valid()
True
>>> formset.save()
[]
# Inline Formsets ############################################################
We can also create a formset that is tied to a parent model. This is how the
admin system's edit inline functionality works.
>>> from django.forms.models import inlineformset_factory
>>> AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=3)
>>> author = Author.objects.get(name='Charles Baudelaire')
>>> formset = AuthorBooksFormSet(instance=author)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" id="id_book_set-0-id" /></p>
<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>
<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>
>>> data = {
... 'book_set-TOTAL_FORMS': '3', # the number of forms rendered
... 'book_set-INITIAL_FORMS': '0', # the number of forms with initial data
... 'book_set-MAX_NUM_FORMS': '', # the max number of forms
... 'book_set-0-title': 'Les Fleurs du Mal',
... 'book_set-1-title': '',
... 'book_set-2-title': '',
... }
>>> formset = AuthorBooksFormSet(data, instance=author)
>>> formset.is_valid()
True
>>> formset.save()
[<Book: Les Fleurs du Mal>]
>>> for book in author.book_set.all():
... print book.title
Les Fleurs du Mal
Now that we've added a book to Charles Baudelaire, let's try adding another
one. This time though, an edit form will be available for every existing
book.
>>> AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2)
>>> author = Author.objects.get(name='Charles Baudelaire')
>>> formset = AuthorBooksFormSet(instance=author)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id" /></p>
<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>
<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>
>>> data = {
... 'book_set-TOTAL_FORMS': '3', # the number of forms rendered
... 'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
... 'book_set-MAX_NUM_FORMS': '', # the max number of forms
... 'book_set-0-id': '1',
... 'book_set-0-title': 'Les Fleurs du Mal',
... 'book_set-1-title': 'Les Paradis Artificiels',
... 'book_set-2-title': '',
... }
>>> formset = AuthorBooksFormSet(data, instance=author)
>>> formset.is_valid()
True
>>> formset.save()
[<Book: Les Paradis Artificiels>]
As you can see, 'Les Paradis Artificiels' is now a book belonging to Charles Baudelaire.
>>> for book in author.book_set.order_by('id'):
... print book.title
Les Fleurs du Mal
Les Paradis Artificiels
The save_as_new parameter lets you re-associate the data to a new instance.
This is used in the admin for save_as functionality.
>>> data = {
... 'book_set-TOTAL_FORMS': '3', # the number of forms rendered
... 'book_set-INITIAL_FORMS': '2', # the number of forms with initial data
... 'book_set-MAX_NUM_FORMS': '', # the max number of forms
... 'book_set-0-id': '1',
... 'book_set-0-title': 'Les Fleurs du Mal',
... 'book_set-1-id': '2',
... 'book_set-1-title': 'Les Paradis Artificiels',
... 'book_set-2-title': '',
... }
>>> formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True)
>>> formset.is_valid()
True
>>> new_author = Author.objects.create(name='Charles Baudelaire')
>>> formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True)
>>> [book for book in formset.save() if book.author.pk == new_author.pk]
[<Book: Les Fleurs du Mal>, <Book: Les Paradis Artificiels>]
Test using a custom prefix on an inline formset.
>>> formset = AuthorBooksFormSet(prefix="test")
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_test-0-title">Title:</label> <input id="id_test-0-title" type="text" name="test-0-title" maxlength="100" /><input type="hidden" name="test-0-author" id="id_test-0-author" /><input type="hidden" name="test-0-id" id="id_test-0-id" /></p>
<p><label for="id_test-1-title">Title:</label> <input id="id_test-1-title" type="text" name="test-1-title" maxlength="100" /><input type="hidden" name="test-1-author" id="id_test-1-author" /><input type="hidden" name="test-1-id" id="id_test-1-id" /></p>
Test inline formsets where the inline-edited object has a custom primary key that is not the fk to the parent object.
>>> AuthorBooksFormSet2 = inlineformset_factory(Author, BookWithCustomPK, can_delete=False, extra=1)
>>> formset = AuthorBooksFormSet2(instance=author)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label> <input type="text" name="bookwithcustompk_set-0-my_pk" id="id_bookwithcustompk_set-0-my_pk" /></p>
<p><label for="id_bookwithcustompk_set-0-title">Title:</label> <input id="id_bookwithcustompk_set-0-title" type="text" name="bookwithcustompk_set-0-title" maxlength="100" /><input type="hidden" name="bookwithcustompk_set-0-author" value="1" id="id_bookwithcustompk_set-0-author" /></p>
>>> data = {
... 'bookwithcustompk_set-TOTAL_FORMS': '1', # the number of forms rendered
... 'bookwithcustompk_set-INITIAL_FORMS': '0', # the number of forms with initial data
... 'bookwithcustompk_set-MAX_NUM_FORMS': '', # the max number of forms
... 'bookwithcustompk_set-0-my_pk': '77777',
... 'bookwithcustompk_set-0-title': 'Les Fleurs du Mal',
... }
>>> formset = AuthorBooksFormSet2(data, instance=author)
>>> formset.is_valid()
True
>>> formset.save()
[<BookWithCustomPK: 77777: Les Fleurs du Mal>]
>>> for book in author.bookwithcustompk_set.all():
... print book.title
Les Fleurs du Mal
Test inline formsets where the inline-edited object uses multi-table inheritance, thus
has a non AutoField yet auto-created primary key.
>>> AuthorBooksFormSet3 = inlineformset_factory(Author, AlternateBook, can_delete=False, extra=1)
>>> formset = AuthorBooksFormSet3(instance=author)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_alternatebook_set-0-title">Title:</label> <input id="id_alternatebook_set-0-title" type="text" name="alternatebook_set-0-title" maxlength="100" /></p>
<p><label for="id_alternatebook_set-0-notes">Notes:</label> <input id="id_alternatebook_set-0-notes" type="text" name="alternatebook_set-0-notes" maxlength="100" /><input type="hidden" name="alternatebook_set-0-author" value="1" id="id_alternatebook_set-0-author" /><input type="hidden" name="alternatebook_set-0-book_ptr" id="id_alternatebook_set-0-book_ptr" /></p>
>>> data = {
... 'alternatebook_set-TOTAL_FORMS': '1', # the number of forms rendered
... 'alternatebook_set-INITIAL_FORMS': '0', # the number of forms with initial data
... 'alternatebook_set-MAX_NUM_FORMS': '', # the max number of forms
... 'alternatebook_set-0-title': 'Flowers of Evil',
... 'alternatebook_set-0-notes': 'English translation of Les Fleurs du Mal'
... }
>>> formset = AuthorBooksFormSet3(data, instance=author)
>>> formset.is_valid()
True
>>> formset.save()
[<AlternateBook: Flowers of Evil - English translation of Les Fleurs du Mal>]
Test inline formsets where the inline-edited object has a unique_together constraint with a nullable member
>>> AuthorBooksFormSet4 = inlineformset_factory(Author, BookWithOptionalAltEditor, can_delete=False, extra=2)
>>> data = {
... 'bookwithoptionalalteditor_set-TOTAL_FORMS': '2', # the number of forms rendered
... 'bookwithoptionalalteditor_set-INITIAL_FORMS': '0', # the number of forms with initial data
... 'bookwithoptionalalteditor_set-MAX_NUM_FORMS': '', # the max number of forms
... 'bookwithoptionalalteditor_set-0-author': '1',
... 'bookwithoptionalalteditor_set-0-title': 'Les Fleurs du Mal',
... 'bookwithoptionalalteditor_set-1-author': '1',
... 'bookwithoptionalalteditor_set-1-title': 'Les Fleurs du Mal',
... }
>>> formset = AuthorBooksFormSet4(data, instance=author)
>>> formset.is_valid()
True
>>> formset.save()
[<BookWithOptionalAltEditor: Les Fleurs du Mal>, <BookWithOptionalAltEditor: Les Fleurs du Mal>]
# ModelForm with a custom save method in an inline formset ###################
>>> class PoemForm(forms.ModelForm):
... def save(self, commit=True):
... # change the name to "Brooklyn Bridge" just to be a jerk.
... poem = super(PoemForm, self).save(commit=False)
... poem.name = u"Brooklyn Bridge"
... if commit:
... poem.save()
... return poem
>>> PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm)
>>> data = {
... 'poem_set-TOTAL_FORMS': '3', # the number of forms rendered
... 'poem_set-INITIAL_FORMS': '0', # the number of forms with initial data
... 'poem_set-MAX_NUM_FORMS': '', # the max number of forms
... 'poem_set-0-name': 'The Cloud in Trousers',
... 'poem_set-1-name': 'I',
... 'poem_set-2-name': '',
... }
>>> poet = Poet.objects.create(name='Vladimir Mayakovsky')
>>> formset = PoemFormSet(data=data, instance=poet)
>>> formset.is_valid()
True
>>> formset.save()
[<Poem: Brooklyn Bridge>, <Poem: Brooklyn Bridge>]
We can provide a custom queryset to our InlineFormSet:
>>> custom_qs = Book.objects.order_by('-title')
>>> formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Paradis Artificiels" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="2" id="id_book_set-0-id" /></p>
<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" value="1" id="id_book_set-1-id" /></p>
<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" value="5" id="id_book_set-2-id" /></p>
<p><label for="id_book_set-3-title">Title:</label> <input id="id_book_set-3-title" type="text" name="book_set-3-title" maxlength="100" /><input type="hidden" name="book_set-3-author" value="1" id="id_book_set-3-author" /><input type="hidden" name="book_set-3-id" id="id_book_set-3-id" /></p>
<p><label for="id_book_set-4-title">Title:</label> <input id="id_book_set-4-title" type="text" name="book_set-4-title" maxlength="100" /><input type="hidden" name="book_set-4-author" value="1" id="id_book_set-4-author" /><input type="hidden" name="book_set-4-id" id="id_book_set-4-id" /></p>
>>> data = {
... 'book_set-TOTAL_FORMS': '5', # the number of forms rendered
... 'book_set-INITIAL_FORMS': '3', # the number of forms with initial data
... 'book_set-MAX_NUM_FORMS': '', # the max number of forms
... 'book_set-0-id': '1',
... 'book_set-0-title': 'Les Fleurs du Mal',
... 'book_set-1-id': '2',
... 'book_set-1-title': 'Les Paradis Artificiels',
... 'book_set-2-id': '5',
... 'book_set-2-title': 'Flowers of Evil',
... 'book_set-3-title': 'Revue des deux mondes',
... 'book_set-4-title': '',
... }
>>> formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
>>> formset.is_valid()
True
>>> custom_qs = Book.objects.filter(title__startswith='F')
>>> formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="5" id="id_book_set-0-id" /></p>
<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>
<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>
>>> data = {
... 'book_set-TOTAL_FORMS': '3', # the number of forms rendered
... 'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
... 'book_set-MAX_NUM_FORMS': '', # the max number of forms
... 'book_set-0-id': '5',
... 'book_set-0-title': 'Flowers of Evil',
... 'book_set-1-title': 'Revue des deux mondes',
... 'book_set-2-title': '',
... }
>>> formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
>>> formset.is_valid()
True
# Test a custom primary key ###################################################
We need to ensure that it is displayed
>>> CustomPrimaryKeyFormSet = modelformset_factory(CustomPrimaryKey)
>>> formset = CustomPrimaryKeyFormSet()
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_form-0-my_pk">My pk:</label> <input id="id_form-0-my_pk" type="text" name="form-0-my_pk" maxlength="10" /></p>
<p><label for="id_form-0-some_field">Some field:</label> <input id="id_form-0-some_field" type="text" name="form-0-some_field" maxlength="100" /></p>
# Custom primary keys with ForeignKey, OneToOneField and AutoField ############
>>> place = Place(name=u'Giordanos', city=u'Chicago')
>>> place.save()
>>> FormSet = inlineformset_factory(Place, Owner, extra=2, can_delete=False)
>>> formset = FormSet(instance=place)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" id="id_owner_set-0-auto_id" /></p>
<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>
>>> data = {
... 'owner_set-TOTAL_FORMS': '2',
... 'owner_set-INITIAL_FORMS': '0',
... 'owner_set-MAX_NUM_FORMS': '',
... 'owner_set-0-auto_id': '',
... 'owner_set-0-name': u'Joe Perry',
... 'owner_set-1-auto_id': '',
... 'owner_set-1-name': '',
... }
>>> formset = FormSet(data, instance=place)
>>> formset.is_valid()
True
>>> formset.save()
[<Owner: Joe Perry at Giordanos>]
>>> formset = FormSet(instance=place)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" value="Joe Perry" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" value="1" id="id_owner_set-0-auto_id" /></p>
<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>
<p><label for="id_owner_set-2-name">Name:</label> <input id="id_owner_set-2-name" type="text" name="owner_set-2-name" maxlength="100" /><input type="hidden" name="owner_set-2-place" value="1" id="id_owner_set-2-place" /><input type="hidden" name="owner_set-2-auto_id" id="id_owner_set-2-auto_id" /></p>
>>> data = {
... 'owner_set-TOTAL_FORMS': '3',
... 'owner_set-INITIAL_FORMS': '1',
... 'owner_set-MAX_NUM_FORMS': '',
... 'owner_set-0-auto_id': u'1',
... 'owner_set-0-name': u'Joe Perry',
... 'owner_set-1-auto_id': '',
... 'owner_set-1-name': u'Jack Berry',
... 'owner_set-2-auto_id': '',
... 'owner_set-2-name': '',
... }
>>> formset = FormSet(data, instance=place)
>>> formset.is_valid()
True
>>> formset.save()
[<Owner: Jack Berry at Giordanos>]
# Ensure a custom primary key that is a ForeignKey or OneToOneField get rendered for the user to choose.
>>> FormSet = modelformset_factory(OwnerProfile)
>>> formset = FormSet()
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_form-0-owner">Owner:</label> <select name="form-0-owner" id="id_form-0-owner">
<option value="" selected="selected">---------</option>
<option value="1">Joe Perry at Giordanos</option>
<option value="2">Jack Berry at Giordanos</option>
</select></p>
<p><label for="id_form-0-age">Age:</label> <input type="text" name="form-0-age" id="id_form-0-age" /></p>
>>> owner = Owner.objects.get(name=u'Joe Perry')
>>> FormSet = inlineformset_factory(Owner, OwnerProfile, max_num=1, can_delete=False)
>>> FormSet.max_num
1
>>> formset = FormSet(instance=owner)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_ownerprofile-0-age">Age:</label> <input type="text" name="ownerprofile-0-age" id="id_ownerprofile-0-age" /><input type="hidden" name="ownerprofile-0-owner" value="1" id="id_ownerprofile-0-owner" /></p>
>>> data = {
... 'ownerprofile-TOTAL_FORMS': '1',
... 'ownerprofile-INITIAL_FORMS': '0',
... 'ownerprofile-MAX_NUM_FORMS': '1',
... 'ownerprofile-0-owner': '',
... 'ownerprofile-0-age': u'54',
... }
>>> formset = FormSet(data, instance=owner)
>>> formset.is_valid()
True
>>> formset.save()
[<OwnerProfile: Joe Perry is 54>]
>>> formset = FormSet(instance=owner)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_ownerprofile-0-age">Age:</label> <input type="text" name="ownerprofile-0-age" value="54" id="id_ownerprofile-0-age" /><input type="hidden" name="ownerprofile-0-owner" value="1" id="id_ownerprofile-0-owner" /></p>
>>> data = {
... 'ownerprofile-TOTAL_FORMS': '1',
... 'ownerprofile-INITIAL_FORMS': '1',
... 'ownerprofile-MAX_NUM_FORMS': '1',
... 'ownerprofile-0-owner': u'1',
... 'ownerprofile-0-age': u'55',
... }
>>> formset = FormSet(data, instance=owner)
>>> formset.is_valid()
True
>>> formset.save()
[<OwnerProfile: Joe Perry is 55>]
# ForeignKey with unique=True should enforce max_num=1
>>> FormSet = inlineformset_factory(Place, Location, can_delete=False)
>>> FormSet.max_num
1
>>> formset = FormSet(instance=place)
>>> for form in formset.forms:
... print form.as_p()
<p><label for="id_location_set-0-lat">Lat:</label> <input id="id_location_set-0-lat" type="text" name="location_set-0-lat" maxlength="100" /></p>
<p><label for="id_location_set-0-lon">Lon:</label> <input id="id_location_set-0-lon" type="text" name="location_set-0-lon" maxlength="100" /><input type="hidden" name="location_set-0-place" value="1" id="id_location_set-0-place" /><input type="hidden" name="location_set-0-id" id="id_location_set-0-id" /></p>
# Foreign keys in parents ########################################
>>> from django.forms.models import _get_foreign_key
>>> type(_get_foreign_key(Restaurant, Owner))
<class 'django.db.models.fields.related.ForeignKey'>
>>> type(_get_foreign_key(MexicanRestaurant, Owner))
<class 'django.db.models.fields.related.ForeignKey'>
# unique/unique_together validation ###########################################
>>> FormSet = modelformset_factory(Product, extra=1)
>>> data = {
... 'form-TOTAL_FORMS': '1',
... 'form-INITIAL_FORMS': '0',
... 'form-MAX_NUM_FORMS': '',
... 'form-0-slug': 'car-red',
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
True
>>> formset.save()
[<Product: car-red>]
>>> data = {
... 'form-TOTAL_FORMS': '1',
... 'form-INITIAL_FORMS': '0',
... 'form-MAX_NUM_FORMS': '',
... 'form-0-slug': 'car-red',
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
False
>>> formset.errors
[{'slug': [u'Product with this Slug already exists.']}]
# unique_together
>>> FormSet = modelformset_factory(Price, extra=1)
>>> data = {
... 'form-TOTAL_FORMS': '1',
... 'form-INITIAL_FORMS': '0',
... 'form-MAX_NUM_FORMS': '',
... 'form-0-price': u'12.00',
... 'form-0-quantity': '1',
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
True
>>> formset.save()
[<Price: 1 for 12.00>]
>>> data = {
... 'form-TOTAL_FORMS': '1',
... 'form-INITIAL_FORMS': '0',
... 'form-MAX_NUM_FORMS': '',
... 'form-0-price': u'12.00',
... 'form-0-quantity': '1',
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
False
>>> formset.errors
[{'__all__': [u'Price with this Price and Quantity already exists.']}]
# unique_together with inlineformset_factory
# Also see bug #8882.
>>> repository = Repository.objects.create(name=u'Test Repo')
>>> FormSet = inlineformset_factory(Repository, Revision, extra=1)
>>> data = {
... 'revision_set-TOTAL_FORMS': '1',
... 'revision_set-INITIAL_FORMS': '0',
... 'revision_set-MAX_NUM_FORMS': '',
... 'revision_set-0-repository': repository.pk,
... 'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
... 'revision_set-0-DELETE': '',
... }
>>> formset = FormSet(data, instance=repository)
>>> formset.is_valid()
True
>>> formset.save()
[<Revision: 146239817507f148d448db38840db7c3cbf47c76 (Test Repo)>]
# attempt to save the same revision against against the same repo.
>>> data = {
... 'revision_set-TOTAL_FORMS': '1',
... 'revision_set-INITIAL_FORMS': '0',
... 'revision_set-MAX_NUM_FORMS': '',
... 'revision_set-0-repository': repository.pk,
... 'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
... 'revision_set-0-DELETE': '',
... }
>>> formset = FormSet(data, instance=repository)
>>> formset.is_valid()
False
>>> formset.errors
[{'__all__': [u'Revision with this Repository and Revision already exists.']}]
# unique_together with inlineformset_factory with overridden form fields
# Also see #9494
>>> FormSet = inlineformset_factory(Repository, Revision, fields=('revision',), extra=1)
>>> data = {
... 'revision_set-TOTAL_FORMS': '1',
... 'revision_set-INITIAL_FORMS': '0',
... 'revision_set-MAX_NUM_FORMS': '',
... 'revision_set-0-repository': repository.pk,
... 'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
... 'revision_set-0-DELETE': '',
... }
>>> formset = FormSet(data, instance=repository)
>>> formset.is_valid()
False
# Use of callable defaults (see bug #7975).
>>> person = Person.objects.create(name='Ringo')
>>> FormSet = inlineformset_factory(Person, Membership, can_delete=False, extra=1)
>>> formset = FormSet(instance=person)
# Django will render a hidden field for model fields that have a callable
# default. This is required to ensure the value is tested for change correctly
# when determine what extra forms have changed to save.
>>> form = formset.forms[0] # this formset only has one form
>>> now = form.fields['date_joined'].initial()
>>> print form.as_p()
<p><label for="id_membership_set-0-date_joined">Date joined:</label> <input type="text" name="membership_set-0-date_joined" value="..." id="id_membership_set-0-date_joined" /><input type="hidden" name="initial-membership_set-0-date_joined" value="..." id="initial-membership_set-0-id_membership_set-0-date_joined" /></p>
<p><label for="id_membership_set-0-karma">Karma:</label> <input type="text" name="membership_set-0-karma" id="id_membership_set-0-karma" /><input type="hidden" name="membership_set-0-person" value="1" id="id_membership_set-0-person" /><input type="hidden" name="membership_set-0-id" id="id_membership_set-0-id" /></p>
# test for validation with callable defaults. Validations rely on hidden fields
>>> data = {
... 'membership_set-TOTAL_FORMS': '1',
... 'membership_set-INITIAL_FORMS': '0',
... 'membership_set-MAX_NUM_FORMS': '',
... 'membership_set-0-date_joined': unicode(now.strftime('%Y-%m-%d %H:%M:%S')),
... 'initial-membership_set-0-date_joined': unicode(now.strftime('%Y-%m-%d %H:%M:%S')),
... 'membership_set-0-karma': '',
... }
>>> formset = FormSet(data, instance=person)
>>> formset.is_valid()
True
# now test for when the data changes
>>> one_day_later = now + datetime.timedelta(days=1)
>>> filled_data = {
... 'membership_set-TOTAL_FORMS': '1',
... 'membership_set-INITIAL_FORMS': '0',
... 'membership_set-MAX_NUM_FORMS': '',
... 'membership_set-0-date_joined': unicode(one_day_later.strftime('%Y-%m-%d %H:%M:%S')),
... 'initial-membership_set-0-date_joined': unicode(now.strftime('%Y-%m-%d %H:%M:%S')),
... 'membership_set-0-karma': '',
... }
>>> formset = FormSet(filled_data, instance=person)
>>> formset.is_valid()
False
# now test with split datetime fields
>>> class MembershipForm(forms.ModelForm):
... date_joined = forms.SplitDateTimeField(initial=now)
... class Meta:
... model = Membership
... def __init__(self, **kwargs):
... super(MembershipForm, self).__init__(**kwargs)
... self.fields['date_joined'].widget = forms.SplitDateTimeWidget()
>>> FormSet = inlineformset_factory(Person, Membership, form=MembershipForm, can_delete=False, extra=1)
>>> data = {
... 'membership_set-TOTAL_FORMS': '1',
... 'membership_set-INITIAL_FORMS': '0',
... 'membership_set-MAX_NUM_FORMS': '',
... 'membership_set-0-date_joined_0': unicode(now.strftime('%Y-%m-%d')),
... 'membership_set-0-date_joined_1': unicode(now.strftime('%H:%M:%S')),
... 'initial-membership_set-0-date_joined': unicode(now.strftime('%Y-%m-%d %H:%M:%S')),
... 'membership_set-0-karma': '',
... }
>>> formset = FormSet(data, instance=person)
>>> formset.is_valid()
True
# inlineformset_factory tests with fk having null=True. see #9462.
# create some data that will exbit the issue
>>> team = Team.objects.create(name=u"Red Vipers")
>>> Player(name="Timmy").save()
>>> Player(name="Bobby", team=team).save()
>>> PlayerInlineFormSet = inlineformset_factory(Team, Player)
>>> formset = PlayerInlineFormSet()
>>> formset.get_queryset()
[]
>>> formset = PlayerInlineFormSet(instance=team)
>>> formset.get_queryset()
[<Player: Bobby>]
# a formset for a Model that has a custom primary key that still needs to be
# added to the formset automatically
>>> FormSet = modelformset_factory(ClassyMexicanRestaurant, fields=["tacos_are_yummy"])
>>> sorted(FormSet().forms[0].fields.keys())
['restaurant', 'tacos_are_yummy']
# Prevent duplicates from within the same formset
>>> FormSet = modelformset_factory(Product, extra=2)
>>> data = {
... 'form-TOTAL_FORMS': 2,
... 'form-INITIAL_FORMS': 0,
... 'form-MAX_NUM_FORMS': '',
... 'form-0-slug': 'red_car',
... 'form-1-slug': 'red_car',
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
False
>>> formset._non_form_errors
[u'Please correct the duplicate data for slug.']
>>> FormSet = modelformset_factory(Price, extra=2)
>>> data = {
... 'form-TOTAL_FORMS': 2,
... 'form-INITIAL_FORMS': 0,
... 'form-MAX_NUM_FORMS': '',
... 'form-0-price': '25',
... 'form-0-quantity': '7',
... 'form-1-price': '25',
... 'form-1-quantity': '7',
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
False
>>> formset._non_form_errors
[u'Please correct the duplicate data for price and quantity, which must be unique.']
# Only the price field is specified, this should skip any unique checks since
# the unique_together is not fulfilled. This will fail with a KeyError if broken.
>>> FormSet = modelformset_factory(Price, fields=("price",), extra=2)
>>> data = {
... 'form-TOTAL_FORMS': '2',
... 'form-INITIAL_FORMS': '0',
... 'form-MAX_NUM_FORMS': '',
... 'form-0-price': '24',
... 'form-1-price': '24',
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
True
>>> FormSet = inlineformset_factory(Author, Book, extra=0)
>>> author = Author.objects.order_by('id')[0]
>>> book_ids = author.book_set.values_list('id', flat=True)
>>> data = {
... 'book_set-TOTAL_FORMS': '2',
... 'book_set-INITIAL_FORMS': '2',
... 'book_set-MAX_NUM_FORMS': '',
...
... 'book_set-0-title': 'The 2008 Election',
... 'book_set-0-author': str(author.id),
... 'book_set-0-id': str(book_ids[0]),
...
... 'book_set-1-title': 'The 2008 Election',
... 'book_set-1-author': str(author.id),
... 'book_set-1-id': str(book_ids[1]),
... }
>>> formset = FormSet(data=data, instance=author)
>>> formset.is_valid()
False
>>> formset._non_form_errors
[u'Please correct the duplicate data for title.']
>>> formset.errors
[{}, {'__all__': u'Please correct the duplicate values below.'}]
>>> FormSet = modelformset_factory(Post, extra=2)
>>> data = {
... 'form-TOTAL_FORMS': '2',
... 'form-INITIAL_FORMS': '0',
... 'form-MAX_NUM_FORMS': '',
...
... 'form-0-title': 'blah',
... 'form-0-slug': 'Morning',
... 'form-0-subtitle': 'foo',
... 'form-0-posted': '2009-01-01',
... 'form-1-title': 'blah',
... 'form-1-slug': 'Morning in Prague',
... 'form-1-subtitle': 'rawr',
... 'form-1-posted': '2009-01-01'
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
False
>>> formset._non_form_errors
[u'Please correct the duplicate data for title which must be unique for the date in posted.']
>>> formset.errors
[{}, {'__all__': u'Please correct the duplicate values below.'}]
>>> data = {
... 'form-TOTAL_FORMS': '2',
... 'form-INITIAL_FORMS': '0',
... 'form-MAX_NUM_FORMS': '',
...
... 'form-0-title': 'foo',
... 'form-0-slug': 'Morning in Prague',
... 'form-0-subtitle': 'foo',
... 'form-0-posted': '2009-01-01',
... 'form-1-title': 'blah',
... 'form-1-slug': 'Morning in Prague',
... 'form-1-subtitle': 'rawr',
... 'form-1-posted': '2009-08-02'
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
False
>>> formset._non_form_errors
[u'Please correct the duplicate data for slug which must be unique for the year in posted.']
>>> data = {
... 'form-TOTAL_FORMS': '2',
... 'form-INITIAL_FORMS': '0',
... 'form-MAX_NUM_FORMS': '',
...
... 'form-0-title': 'foo',
... 'form-0-slug': 'Morning in Prague',
... 'form-0-subtitle': 'rawr',
... 'form-0-posted': '2008-08-01',
... 'form-1-title': 'blah',
... 'form-1-slug': 'Prague',
... 'form-1-subtitle': 'rawr',
... 'form-1-posted': '2009-08-02'
... }
>>> formset = FormSet(data)
>>> formset.is_valid()
False
>>> formset._non_form_errors
[u'Please correct the duplicate data for subtitle which must be unique for the month in posted.']
"""}
| {
"content_hash": "82a85b3d5cae7b1ffe6ab27b48baad79",
"timestamp": "",
"source": "github",
"line_count": 1230,
"max_line_length": 370,
"avg_line_length": 40.99593495934959,
"alnum_prop": 0.643629152206247,
"repo_name": "alex/django-old",
"id": "1665f62416b0bb4d74830aed1ba95a83311702ac",
"size": "50425",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/modeltests/model_formsets/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "91750"
},
{
"name": "Python",
"bytes": "6425033"
},
{
"name": "Shell",
"bytes": "3519"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.