text stringlengths 4 1.02M | meta dict |
|---|---|
"""Python 'uu_codec' Codec - UU content transfer encoding.
This codec de/encodes from bytes to bytes and is therefore usable with
bytes.transform() and bytes.untransform().
Written by Marc-Andre Lemburg (mal@lemburg.com). Some details were
adapted from uu.py which was written by Lance Ellinghouse and
modified by Jack Jansen and Fredrik Lundh.
"""
import codecs
import binascii
from io import BytesIO
### Codec APIs
def uu_encode(input, errors='strict', filename='<data>', mode=0o666):
assert errors == 'strict'
infile = BytesIO(input)
outfile = BytesIO()
read = infile.read
write = outfile.write
# Encode
write(('begin %o %s\n' % (mode & 0o777, filename)).encode('ascii'))
chunk = read(45)
while chunk:
write(binascii.b2a_uu(chunk))
chunk = read(45)
write(b' \nend\n')
return (outfile.getvalue(), len(input))
def uu_decode(input, errors='strict'):
assert errors == 'strict'
infile = BytesIO(input)
outfile = BytesIO()
readline = infile.readline
write = outfile.write
# Find start of encoded data
while 1:
s = readline()
if not s:
raise ValueError('Missing "begin" line in input data')
if s[:5] == b'begin':
break
# Decode
while True:
s = readline()
if not s or s == b'end\n':
break
try:
data = binascii.a2b_uu(s)
except binascii.Error as v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
data = binascii.a2b_uu(s[:nbytes])
#sys.stderr.write("Warning: %s\n" % str(v))
write(data)
if not s:
raise ValueError('Truncated input data')
return (outfile.getvalue(), len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return uu_encode(input, errors)
def decode(self, input, errors='strict'):
return uu_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return uu_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return uu_decode(input, self.errors)[0]
class StreamWriter(Codec, codecs.StreamWriter):
charbuffertype = bytes
class StreamReader(Codec, codecs.StreamReader):
charbuffertype = bytes
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='uu',
encode=uu_encode,
decode=uu_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
_is_text_encoding=False,
)
| {
"content_hash": "ed2f972ced191ca60b4874ea64dc1a91",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 71,
"avg_line_length": 27.96,
"alnum_prop": 0.6351931330472103,
"repo_name": "timm/timmnix",
"id": "e3269e40cd306e1cb42586af87e7cde2d2115a48",
"size": "2796",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pypy3-v5.5.0-linux64/lib-python/3/encodings/uu_codec.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1641"
},
{
"name": "Batchfile",
"bytes": "1234"
},
{
"name": "C",
"bytes": "436685"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "Common Lisp",
"bytes": "4"
},
{
"name": "Emacs Lisp",
"bytes": "290698"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Makefile",
"bytes": "1681"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "1540"
},
{
"name": "Prolog",
"bytes": "14301"
},
{
"name": "Python",
"bytes": "21267592"
},
{
"name": "Roff",
"bytes": "21080"
},
{
"name": "Shell",
"bytes": "27687"
},
{
"name": "TeX",
"bytes": "3052861"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""Prepare JSON for all files in current directory."""
import argparse
import os
import json
def main():
parser = argparse.ArgumentParser("Prepare JSON for all files in specified directory")
parser.add_argument("directory", help="directory with sources")
parser.add_argument("output", help="output file")
args = parser.parse_args()
path = args.directory
files = [path + f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
source_files = []
for f in files:
source_files.append({
"path": os.path.abspath(f),
"name": os.path.basename(f)
})
s = json.dumps(source_files)
with open(args.output, "w") as f:
f.write(s)
if __name__ == "__main__":
main()
| {
"content_hash": "2ec3f0a9792f93871610fd93c55f3217",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.6791907514450867,
"repo_name": "Timus1712/source-code-comparator",
"id": "6b27b8c7fa1363268ab4cdb07403963853973b84",
"size": "714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assets/prepare_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3945"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adventure', '0009_adventure_slug'),
]
operations = [
migrations.AddField(
model_name='artifact',
name='guard_id',
field=models.IntegerField(help_text='If a bound monster, the ID of a monster that prevents the player from freeing it', null=True),
),
migrations.AlterField(
model_name='artifact',
name='key_id',
field=models.IntegerField(help_text='If a container, door, or bound monster, the artifact ID of the key that opens it', null=True),
),
]
| {
"content_hash": "8936930ff6aea1b593b9cf060c954344",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 143,
"avg_line_length": 31.26086956521739,
"alnum_prop": 0.6147426981919333,
"repo_name": "kdechant/eamon",
"id": "03a26d4a96d15dd3c565e0c03d18886822d23721",
"size": "791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adventure/migrations/0010_auto_20160327_2227.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "35351"
},
{
"name": "JavaScript",
"bytes": "22343"
},
{
"name": "Python",
"bytes": "223678"
},
{
"name": "SCSS",
"bytes": "14127"
},
{
"name": "TypeScript",
"bytes": "1133700"
}
],
"symlink_target": ""
} |
import importlib
import sys
import warnings
warnings.warn('progress has been moved from stanza.research to stanza.monitoring; the module in research is deprecated.')
sys.modules[__name__] = importlib.import_module('...monitoring.progress', __name__)
| {
"content_hash": "393ed632ca91d953a38bcfcdda8ab84b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 121,
"avg_line_length": 36,
"alnum_prop": 0.7698412698412699,
"repo_name": "arunchaganty/aeschines",
"id": "1d45ddeb615667750f349d036cc1c962fceda16b",
"size": "252",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third-party/stanza/stanza/research/progress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4165"
},
{
"name": "HTML",
"bytes": "2114"
},
{
"name": "JavaScript",
"bytes": "10159"
},
{
"name": "Jupyter Notebook",
"bytes": "551699"
},
{
"name": "Python",
"bytes": "142734"
},
{
"name": "Shell",
"bytes": "5610"
}
],
"symlink_target": ""
} |
import bond
import time
class Ring(object):
"""Docstring for Ring"""
ring_type = 0
improper = False
fused = False
thio = False
atom1 = ""
atom2 = ""
atom3 = ""
atom4 = ""
atom5 = ""
atom6 = ""
def __init__(self,a1,a2,a3,a4,a5,a6=None):
self.atom1 = a1
self.atom2 = a2
self.atom3 = a3
self.atom4 = a4
self.atom5 = a5
self.atom6 = a6
a1.ring = True
a2.ring = True
a3.ring = True
a4.ring = True
a5.ring = True
self.ring_type = 5
if a6 != None:
self.ring_type = 6
a6.ring = True
def list(self):
rList = []
rList.append(self.atom1)
rList.append(self.atom2)
rList.append(self.atom3)
rList.append(self.atom4)
rList.append(self.atom5)
if self.atom6 != None:
rList.append(self.atom6)
return rList
def list_type(self):
rList = []
rList.append(self.atom1.atom_type)
rList.append(self.atom2.atom_type)
rList.append(self.atom3.atom_type)
rList.append(self.atom4.atom_type)
rList.append(self.atom5.atom_type)
if self.atom6 != None:
rList.append(self.atom6.atom_type)
return rList
def create_rings(atoms):
""" Creates Ring object through the use of following each atom's bonds
Keyword Arguments:
atoms - The list of atom objects to find the rings
"""
ring = []
ringlist = []
for i in range(len(atoms)):
layer1 = atoms[i]
if layer1.ring:
continue
path = []
path.append(layer1)
for j in range(len(layer1.atom_bonds)):
layer2 = layer1.atom_bonds[j]
if layer2 in path:
continue
path.append(layer2)
for k in range(len(layer2.atom_bonds)):
layer3 = layer2.atom_bonds[k]
if layer3 in path:
continue
path.append(layer3)
for l in range(len(layer3.atom_bonds)):
layer4 = layer3.atom_bonds[l]
if layer4 in path:
continue
path.append(layer4)
for m in range(len(layer4.atom_bonds)):
layer5 = layer4.atom_bonds[m]
if layer5 in path:
continue
for n in range(len(layer5.atom_bonds)):
layer6 = layer5.atom_bonds[n]
if layer6 == path[0]:
ringlist.append(Ring(layer1,layer2,layer3,layer4,layer5))
if layer6 in path:
continue
path.append(layer6)
for o in range(len(layer6.atom_bonds)):
layer7 = layer6.atom_bonds[o]
if layer7 == path[0]:
ringlist.append(Ring(layer1,layer2,layer3,layer4,layer5,layer6))
return ringlist
| {
"content_hash": "91c9d52e624b4e0050e22a3b36706f93",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 100,
"avg_line_length": 31.137254901960784,
"alnum_prop": 0.4782745591939547,
"repo_name": "sipjca/cmlparser_py",
"id": "c69d128bfea4e555276147d35ef8137b45e37190",
"size": "3176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "120112"
}
],
"symlink_target": ""
} |
"""
Hacer un conversor de gradación
a. De Celsius a Fahrenheit y de Fahrenheit a Celsius
b. Escribir una función que genere aleatoriamente valores de temperatura entre -5 y 40ºC
c. Escribir una función que genere una tupla con 1200 valores de temperatura
d. Considerar que cada 100 datos del apartado C es un mes. Dar una media de temperaturas por meses
e. Definir una función que traduzca la tupla de Celsius a Fahrenheit y viceversa
"""
from random import randint
def conversor(grados, de_f_a_c=False):
""" Conversor de grados. Por defecto convierte de Celsius a Fahrenheit
Si el segundo parámetro es True convertirá de Fahrenheit a Celsius
"""
if de_f_a_c:
return (grados - 32) / 1.8
else:
return grados * 1.8 + 32
def rand_temp(mes=0):
""" Genera un número aleatorio entre distintos rangos según el mes
Diciembre (12), Enero (1) y Febrero (2) [-10, 10]
Marzo (3), Abril (4) y Mayo (5) o Octubre (10) y Noviembre (11) [3, 25]
Junio (6), Julio (7), Agosto (8) y Septiembre (9) [20, 40]
En cualquier otro caso [-5, 40]
"""
if mes <= 2 or mes >= 12:
return randint(-10, 10)
if 3 <= mes <= 5 or 10 <= mes <= 11:
return randint(3, 25)
if 6 <= mes <= 9:
return randint(20, 40)
else:
return randint(-5, 40)
def gen_tem():
""" Genera y devuelve una tupla de 1200 temperaturas aleatorias a 100 temperaturas por mes
"""
ret = ()
for i in range(1200):
ret += rand_temp(i // 100 + 1),
return ret
def subdivide_gen():
""" Devuelve una lista con las temperaturas divididas de 100 en 100
"""
return [gen_tem()[100 * x + 0: 100 * x + 100] for x in range(len(gen_tem()) // 100)]
def media_mes():
""" Devuelve la media de temperaturas de cada mes
"""
ret = ()
for element in subdivide_gen():
ret += sum(element) / len(element),
return ret
def con_tuple(tupla, de_f_a_c=False):
""" Convierte y devuelve los grados de una tupla
Por defecto convierte de Celsius a Fahrenheit
Si de_f_a_c = True convertirá de Fahrenheit a Celsius
"""
ret = ()
for elem in tupla:
ret += conversor(elem, de_f_a_c)
return ret
print(media_mes())
| {
"content_hash": "97b7e925cce237027ccd83a0f1beb8cf",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 102,
"avg_line_length": 30.486486486486488,
"alnum_prop": 0.625,
"repo_name": "IhToN/DAW1-PRG",
"id": "08de032aa5b754a3fe6b55ffb98bd8f0bf38c2ca",
"size": "2266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ejercicios/PrimTrim/Ejercicio35.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "276667"
}
],
"symlink_target": ""
} |
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class TestGAXClient(unittest.TestCase):
def _get_target_class(self):
from google.cloud.vision._gax import _GAPICVisionAPI
return _GAPICVisionAPI
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def test_ctor(self):
client = mock.Mock(
_credentials=_make_credentials(),
spec=['_credentials'],
)
with mock.patch('google.cloud.vision._gax.image_annotator_client.'
'ImageAnnotatorClient'):
api = self._make_one(client)
self.assertIs(api._client, client)
def test_gapic_credentials(self):
from google.cloud.gapic.vision.v1.image_annotator_client import (
ImageAnnotatorClient)
from google.cloud.vision import Client
# Mock the GAPIC ImageAnnotatorClient, whose arguments we
# want to check.
with mock.patch.object(ImageAnnotatorClient, '__init__') as iac:
iac.return_value = None
# Create the GAX client.
credentials = _make_credentials()
client = Client(credentials=credentials, project='foo')
self._make_one(client=client)
# Assert that the GAPIC constructor was called once, and
# that the credentials were sent.
iac.assert_called_once()
_, _, kwargs = iac.mock_calls[0]
self.assertIs(kwargs['credentials'], credentials)
def test_kwarg_lib_name(self):
from google.cloud.gapic.vision.v1.image_annotator_client import (
ImageAnnotatorClient)
from google.cloud.vision import __version__
from google.cloud.vision import Client
# Mock the GAPIC ImageAnnotatorClient, whose arguments we
# want to check.
with mock.patch.object(ImageAnnotatorClient, '__init__') as iac:
iac.return_value = None
# Create the GAX client.
client = Client(credentials=_make_credentials(), project='foo')
self._make_one(client=client)
# Assert that the GAPIC constructor was called once, and
# that lib_name and lib_version were sent.
iac.assert_called_once()
_, _, kwargs = iac.mock_calls[0]
self.assertEqual(kwargs['lib_name'], 'gccl')
self.assertEqual(kwargs['lib_version'], __version__)
def test_annotation(self):
from google.cloud.vision.feature import Feature
from google.cloud.vision.feature import FeatureTypes
from google.cloud.vision.image import Image
client = mock.Mock(spec_set=['_credentials'])
feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
image_content = b'abc 1 2 3'
image = Image(client, content=image_content)
with mock.patch('google.cloud.vision._gax.image_annotator_client.'
'ImageAnnotatorClient'):
gax_api = self._make_one(client)
mock_response = {
'batch_annotate_images.return_value':
mock.Mock(responses=['mock response data']),
}
gax_api._annotator_client = mock.Mock(
spec_set=['batch_annotate_images'], **mock_response)
with mock.patch('google.cloud.vision._gax.Annotations') as mock_anno:
images = ((image, [feature]),)
gax_api.annotate(images)
mock_anno.from_pb.assert_called_with('mock response data')
gax_api._annotator_client.batch_annotate_images.assert_called()
def test_annotate_no_requests(self):
client = mock.Mock(spec_set=['_credentials'])
with mock.patch('google.cloud.vision._gax.image_annotator_client.'
'ImageAnnotatorClient'):
gax_api = self._make_one(client)
response = gax_api.annotate()
self.assertEqual(response, [])
gax_api._annotator_client.batch_annotate_images.assert_not_called()
def test_annotate_no_results(self):
from google.cloud.vision.feature import Feature
from google.cloud.vision.feature import FeatureTypes
from google.cloud.vision.image import Image
client = mock.Mock(spec_set=['_credentials'])
feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
image_content = b'abc 1 2 3'
image = Image(client, content=image_content)
with mock.patch('google.cloud.vision._gax.image_annotator_client.'
'ImageAnnotatorClient'):
gax_api = self._make_one(client)
mock_response = {
'batch_annotate_images.return_value': mock.Mock(responses=[]),
}
gax_api._annotator_client = mock.Mock(
spec_set=['batch_annotate_images'], **mock_response)
with mock.patch('google.cloud.vision._gax.Annotations'):
images = ((image, [feature]),)
response = gax_api.annotate(images)
self.assertEqual(len(response), 0)
self.assertIsInstance(response, list)
gax_api._annotator_client.batch_annotate_images.assert_called()
def test_annotate_multiple_results(self):
from google.cloud.proto.vision.v1 import image_annotator_pb2
from google.cloud.vision.annotations import Annotations
from google.cloud.vision.feature import Feature
from google.cloud.vision.feature import FeatureTypes
from google.cloud.vision.image import Image
client = mock.Mock(spec_set=['_credentials'])
feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
image_content = b'abc 1 2 3'
image = Image(client, content=image_content)
with mock.patch('google.cloud.vision._gax.image_annotator_client.'
'ImageAnnotatorClient'):
gax_api = self._make_one(client)
responses = [
image_annotator_pb2.AnnotateImageResponse(),
image_annotator_pb2.AnnotateImageResponse(),
]
response = image_annotator_pb2.BatchAnnotateImagesResponse(
responses=responses)
gax_api._annotator_client = mock.Mock(
spec_set=['batch_annotate_images'])
gax_api._annotator_client.batch_annotate_images.return_value = response
images = ((image, [feature]),)
responses = gax_api.annotate(images)
self.assertEqual(len(responses), 2)
self.assertIsInstance(responses[0], Annotations)
self.assertIsInstance(responses[1], Annotations)
gax_api._annotator_client.batch_annotate_images.assert_called()
def test_annotate_with_pb_requests_results(self):
from google.cloud.proto.vision.v1 import image_annotator_pb2
from google.cloud.vision.annotations import Annotations
client = mock.Mock(spec_set=['_credentials'])
feature_type = image_annotator_pb2.Feature.CROP_HINTS
feature = image_annotator_pb2.Feature(type=feature_type, max_results=2)
image_content = b'abc 1 2 3'
image = image_annotator_pb2.Image(content=image_content)
aspect_ratios = [1.3333, 1.7777]
crop_hints_params = image_annotator_pb2.CropHintsParams(
aspect_ratios=aspect_ratios)
image_context = image_annotator_pb2.ImageContext(
crop_hints_params=crop_hints_params)
request = image_annotator_pb2.AnnotateImageRequest(
image=image, features=[feature], image_context=image_context)
with mock.patch('google.cloud.vision._gax.image_annotator_client.'
'ImageAnnotatorClient'):
gax_api = self._make_one(client)
responses = [
image_annotator_pb2.AnnotateImageResponse(),
image_annotator_pb2.AnnotateImageResponse(),
]
response = image_annotator_pb2.BatchAnnotateImagesResponse(
responses=responses)
gax_api._annotator_client = mock.Mock(
spec_set=['batch_annotate_images'])
gax_api._annotator_client.batch_annotate_images.return_value = response
responses = gax_api.annotate(requests_pb=[request])
self.assertEqual(len(responses), 2)
for annotation in responses:
self.assertIsInstance(annotation, Annotations)
gax_api._annotator_client.batch_annotate_images.assert_called()
class Test__to_gapic_feature(unittest.TestCase):
def _call_fut(self, feature):
from google.cloud.vision._gax import _to_gapic_feature
return _to_gapic_feature(feature)
def test__to_gapic_feature(self):
from google.cloud.vision.feature import Feature
from google.cloud.vision.feature import FeatureTypes
from google.cloud.proto.vision.v1 import image_annotator_pb2
feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
feature_pb = self._call_fut(feature)
self.assertIsInstance(feature_pb, image_annotator_pb2.Feature)
self.assertEqual(feature_pb.type, 4)
self.assertEqual(feature_pb.max_results, 5)
class Test__to_gapic_image(unittest.TestCase):
def _call_fut(self, image):
from google.cloud.vision._gax import _to_gapic_image
return _to_gapic_image(image)
def test__to_gapic_image_content(self):
from google.cloud.vision.image import Image
from google.cloud.proto.vision.v1 import image_annotator_pb2
image_content = b'abc 1 2 3'
client = object()
image = Image(client, content=image_content)
image_pb = self._call_fut(image)
self.assertIsInstance(image_pb, image_annotator_pb2.Image)
self.assertEqual(image_pb.content, image_content)
def test__to_gapic_gcs_image_uri(self):
from google.cloud.vision.image import Image
from google.cloud.proto.vision.v1 import image_annotator_pb2
image_uri = 'gs://1234/34.jpg'
client = object()
image = Image(client, source_uri=image_uri)
image_pb = self._call_fut(image)
self.assertIsInstance(image_pb, image_annotator_pb2.Image)
self.assertEqual(image_pb.source.gcs_image_uri, image_uri)
def test__to_gapic_image_uri(self):
from google.cloud.vision.image import Image
from google.cloud.proto.vision.v1 import image_annotator_pb2
image_uri = 'http://1234/34.jpg'
client = object()
image = Image(client, source_uri=image_uri)
image_pb = self._call_fut(image)
self.assertIsInstance(image_pb, image_annotator_pb2.Image)
self.assertEqual(image_pb.source.image_uri, image_uri)
def test__to_gapic_invalid_image_uri(self):
from google.cloud.vision.image import Image
image_uri = 'ftp://1234/34.jpg'
client = object()
image = Image(client, source_uri=image_uri)
with self.assertRaises(ValueError):
self._call_fut(image)
def test__to_gapic_with_empty_image(self):
image = mock.Mock(
content=None, source=None, spec=['content', 'source'])
with self.assertRaises(ValueError):
self._call_fut(image)
| {
"content_hash": "a71b84bd309d20fe2f43fe9eaa76dce1",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 39.53003533568904,
"alnum_prop": 0.637704478412443,
"repo_name": "daspecster/google-cloud-python",
"id": "b2c0ea5ab430385ff1bdbe09da3a169682fa09c5",
"size": "11763",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vision/unit_tests/test__gax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "4033334"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, Gio, Gtk
from ScientificProjects.Config import read_config, write_config
class PreferencesDialog(Gtk.Dialog):
def __init__(self, parent, config_file_name):
self.config_file_name = config_file_name
Gtk.Dialog.__init__(self, 'Preferences', parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(150, 100)
self.set_border_width(6)
label = Gtk.Label("Database settings")
box = self.get_content_area()
box.add(label)
box.set_spacing(15)
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=15)
box.add(hbox)
vbox_left = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
vbox_right = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
vbox_left.set_homogeneous(True)
vbox_right.set_homogeneous(True)
hbox.pack_start(vbox_left, False, True, 0)
hbox.pack_start(vbox_right, True, True, 0)
label = Gtk.Label("name")
label.set_halign(Gtk.Align.END)
self.db_name = Gtk.Entry()
vbox_left.pack_start(label, True, True, 0)
vbox_right.pack_start(self.db_name, True, True, 0)
label = Gtk.Label("backend")
label.set_halign(Gtk.Align.END)
self.db_backend = Gtk.Entry()
vbox_left.pack_start(label, True, True, 0)
vbox_right.pack_start(self.db_backend, True, True, 0)
label = Gtk.Label("host")
label.set_halign(Gtk.Align.END)
self.db_host = Gtk.Entry()
vbox_left.pack_start(label, True, True, 0)
vbox_right.pack_start(self.db_host, True, True, 0)
label = Gtk.Label("port")
label.set_halign(Gtk.Align.END)
self.db_port = Gtk.SpinButton()
self.db_port.set_digits(0)
self.db_port.set_range(0, 100000)
self.db_port.set_increments(1, 10);
vbox_left.pack_start(label, True, True, 0)
vbox_right.pack_start(self.db_port, True, True, 0)
label = Gtk.Label("user")
label.set_halign(Gtk.Align.END)
self.db_user = Gtk.Entry()
vbox_left.pack_start(label, True, True, 0)
vbox_right.pack_start(self.db_user, True, True, 0)
label = Gtk.Label("password")
label.set_halign(Gtk.Align.END)
self.db_password = Gtk.Entry()
self.db_password.set_visibility(False)
vbox_left.pack_start(label, True, True, 0)
vbox_right.pack_start(self.db_password, True, True, 0)
self.show_all()
self.config = None
self.read_in_config()
def read_in_config(self):
try:
self.config = read_config(self.config_file_name)
self.db_name.set_text(self.config['db_name'])
self.db_backend.set_text(self.config['backend'])
self.db_host.set_text(self.config['host'])
port = self.config['port']
try:
port = int(port)
self.config['port'] = port
except ValueError:
if self.config['port'] == '':
self.config['port'] = 0
pass
try:
self.db_port.set_value(port)
except TypeError:
pass
self.db_user.set_text(self.config['user'])
self.db_password.set_text(self.config['password'])
except (IOError, ValueError):
self.config = None
def save_config(self):
config = {'db_name': self.db_name.get_text(), 'backend': self.db_backend.get_text(),
'host': self.db_host.get_text(), 'port': int(self.db_port.get_value()),
'user': self.db_user.get_text(), 'password': self.db_password.get_text()}
if config != self.config:
print('config changed')
write_config(config, self.config_file_name)
return True
return False
| {
"content_hash": "2904e9585dd4c514322fea8ea0bf872c",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 92,
"avg_line_length": 38,
"alnum_prop": 0.5767543859649122,
"repo_name": "bond-anton/TrimMerge",
"id": "0d800b67ae0f78a0c1b188628ff4d041c9b0e08f",
"size": "4119",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "TrimMergeUI/PreferencesDialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "88114"
}
],
"symlink_target": ""
} |
import datetime
import http
import json
import pathlib
import pytest
from gidgethub import (
BadRequest,
BadRequestUnknownError,
GitHubBroken,
HTTPException,
InvalidField,
RateLimitExceeded,
RedirectionException,
ValidationError,
ValidationFailure,
)
from gidgethub import sansio
class TestValidateEvent:
"""Tests for gidgethub.sansio.validate_event()."""
secret = "123456"
payload = b"gidget"
hash_signature = "091319196718d5bcb1c20ad25fc890597423ecdbad1f947f560afd643b5000de"
signature = "sha256=" + hash_signature
def test_malformed_signature(self):
"""Error out if the signature doesn't start with "sha256=" or "sha1="."""
with pytest.raises(ValidationFailure):
sansio.validate_event(
self.payload, secret=self.secret, signature=self.hash_signature
)
def test_validation(self):
"""Success case."""
sansio.validate_event(
self.payload, secret=self.secret, signature=self.signature
)
def test_failure(self):
with pytest.raises(ValidationFailure):
sansio.validate_event(
self.payload + b"!", secret=self.secret, signature=self.signature
)
class TestEvent:
"""Tests for gidgethub.sansio.Event."""
data = {"action": "opened"}
data_bytes = b'{"action": "opened"}'
secret = "123456"
headers = {
"content-type": "application/json",
"x-github-event": "pull_request",
"x-github-delivery": "72d3162e-cc78-11e3-81ab-4c9367dc0958",
"x-hub-signature": "sha1=c28e33b2e56e548956c446e890929a6cbec3ac89",
"x-hub-signature-256": "sha256=0340d06469a1b35662ebd7a67ea2c0c328239f319f1cfafb221451de629e0430",
}
def check_event(self, event):
"""Check that an event matches the test data provided by the class."""
assert event.event == self.headers["x-github-event"]
assert event.delivery_id == self.headers["x-github-delivery"]
assert event.data == self.data
def test_init(self):
ins = sansio.Event(
self.data,
event=self.headers["x-github-event"],
delivery_id=self.headers["x-github-delivery"],
)
self.check_event(ins)
def test_from_http_json(self):
"""Construct an event from complete HTTP information."""
event = sansio.Event.from_http(
self.headers, self.data_bytes, secret=self.secret
)
self.check_event(event)
def test_from_http_urlencoded(self):
headers, body = sample("ping_urlencoded", 200)
event = sansio.Event.from_http(headers, body)
assert event.data["zen"] == "Keep it logically awesome."
def test_from_http_no_content_type(self):
"""Only accept data when content-type is application/json."""
headers_no_content_type = self.headers.copy()
del headers_no_content_type["content-type"]
with pytest.raises(BadRequest):
sansio.Event.from_http(
headers_no_content_type, self.data_bytes, secret=self.secret
)
def test_from_http_unknown_content_type(self):
headers = headers = {
"content-type": "image/png",
"x-github-event": "pull_request",
"x-github-delivery": "72d3162e-cc78-11e3-81ab-4c9367dc0958",
}
with pytest.raises(BadRequest):
sansio.Event.from_http(headers, self.data_bytes)
pass
def test_from_http_missing_secret(self):
"""Signature but no secret raises ValidationFailure."""
with pytest.raises(ValidationFailure):
sansio.Event.from_http(self.headers, self.data_bytes)
def test_from_http_missing_signature(self):
"""Secret but no signature raises ValidationFailure."""
headers_no_sig = self.headers.copy()
del headers_no_sig["x-hub-signature-256"]
del headers_no_sig["x-hub-signature"]
with pytest.raises(ValidationFailure):
sansio.Event.from_http(headers_no_sig, self.data_bytes, secret=self.secret)
def test_from_http_bad_signature(self):
with pytest.raises(ValidationFailure):
sansio.Event.from_http(
self.headers, self.data_bytes, secret=self.secret + "no secret"
)
def test_from_http_sha1_signature(self):
headers = self.headers.copy()
del headers["x-hub-signature-256"]
event = sansio.Event.from_http(headers, self.data_bytes, secret=self.secret)
self.check_event(event)
def test_from_http_no_signature(self):
headers = self.headers.copy()
del headers["x-hub-signature-256"]
del headers["x-hub-signature"]
event = sansio.Event.from_http(headers, self.data_bytes)
self.check_event(event)
class TestAcceptFormat:
"""Tests for gidgethub.sansio.accept_format()."""
def test_defaults(self):
assert sansio.accept_format() == "application/vnd.github.v3+json"
def test_format(self):
expect = "application/vnd.github.v3.raw+json"
assert sansio.accept_format(media="raw") == expect
def test_no_json(self):
expect = "application/vnd.github.v3.raw"
assert sansio.accept_format(media="raw", json=False) == expect
def test_version(self):
expect = "application/vnd.github.cloak-preview+json"
assert sansio.accept_format(version="cloak-preview") == expect
class TestCreateHeaders:
"""Tests for gidgethub.sansio.create_headers()."""
def test_common_case(self):
user_agent = "brettcannon"
oauth_token = "secret"
headers = sansio.create_headers(user_agent, oauth_token=oauth_token)
assert headers["user-agent"] == user_agent
assert headers["authorization"] == f"token {oauth_token}"
def test_api_change(self):
test_api = "application/vnd.github.cloak-preview+json"
user_agent = "brettcannon"
headers = sansio.create_headers(user_agent, accept=test_api)
assert headers["user-agent"] == user_agent
assert headers["accept"] == test_api
def test_all_keys_lowercase(self):
"""Test all header fields are lowercase."""
user_agent = "brettcannon"
test_api = "application/vnd.github.cloak-preview+json"
oauth_token = "secret"
headers = sansio.create_headers(
user_agent, accept=test_api, oauth_token=oauth_token
)
assert len(headers) == 3
for key in headers.keys():
assert key == key.lower()
assert headers["user-agent"] == user_agent
assert headers["accept"] == test_api
assert headers["authorization"] == f"token {oauth_token}"
def test_authorization_with_jwt(self):
user_agent = "brettcannon"
jwt = "secret"
headers = sansio.create_headers(user_agent, jwt=jwt)
assert headers["user-agent"] == user_agent
assert headers["authorization"] == f"bearer {jwt}"
def test_cannot_pass_both_jwt_and_oauth(self):
user_agent = "brettcannon"
jwt = "secret jwt"
oauth_token = "secret oauth token"
with pytest.raises(ValueError) as exc_info:
sansio.create_headers(user_agent, oauth_token=oauth_token, jwt=jwt)
assert str(exc_info.value) == "Cannot pass both oauth_token and jwt."
class TestRateLimit:
def test_init(self):
left = 42
rate = 64
reset = datetime.datetime.now(datetime.timezone.utc)
rate_limit = sansio.RateLimit(
remaining=left, limit=rate, reset_epoch=reset.timestamp()
)
assert rate_limit.remaining == left
assert rate_limit.limit == rate
assert rate_limit.reset_datetime == reset
def test_bool(self):
now = datetime.datetime.now(datetime.timezone.utc)
year_from_now = now + datetime.timedelta(365)
year_ago = now - datetime.timedelta(365)
# Requests left.
rate = sansio.RateLimit(
remaining=1, limit=1, reset_epoch=year_from_now.timestamp()
)
assert rate
# Reset passed.
rate = sansio.RateLimit(remaining=0, limit=1, reset_epoch=year_ago.timestamp())
assert rate
# No requests and reset not passed.
rate = sansio.RateLimit(
remaining=0, limit=1, reset_epoch=year_from_now.timestamp()
)
assert not rate
def test_from_http(self):
left = 42
rate = 65
reset = datetime.datetime.now(datetime.timezone.utc)
headers = {
"x-ratelimit-limit": str(rate),
"x-ratelimit-remaining": str(left),
"x-ratelimit-reset": str(reset.timestamp()),
}
rate_limit = sansio.RateLimit.from_http(headers)
assert rate_limit.limit == rate
assert rate_limit.remaining == left
assert rate_limit.reset_datetime == reset
def test___str__(self):
left = 4200
rate = 65000
reset = datetime.datetime.now(datetime.timezone.utc)
message = str(
sansio.RateLimit(limit=rate, remaining=left, reset_epoch=reset.timestamp())
)
assert format(left, ",") in message
assert format(rate, ",") in message
assert str(reset) in message
def test_from_http_no_ratelimit(self):
headers = {}
rate_limit = sansio.RateLimit.from_http(headers)
assert rate_limit is None
def sample(directory, status_code):
# pytest doesn't set __spec__.origin :(
sample_dir = pathlib.Path(__file__).parent / "samples" / directory
headers_path = sample_dir / f"{status_code}.json"
with headers_path.open("r") as file:
headers = json.load(file)
body = (sample_dir / "body").read_bytes()
return headers, body
class TestDecipherResponse:
"""Tests for gidgethub.sansio.decipher_response()."""
def test_5XX(self):
status_code = 502
with pytest.raises(GitHubBroken) as exc_info:
sansio.decipher_response(status_code, {}, b"")
assert exc_info.value.status_code == http.HTTPStatus(status_code)
def test_4XX_no_message(self):
status_code = 400
with pytest.raises(BadRequest) as exc_info:
sansio.decipher_response(status_code, {}, b"")
assert exc_info.value.status_code == http.HTTPStatus(status_code)
def test_4XX_message(self):
status_code = 400
message = json.dumps({"message": "it went bad"}).encode("UTF-8")
headers = {"content-type": "application/json; charset=utf-8"}
with pytest.raises(BadRequest) as exc_info:
sansio.decipher_response(status_code, headers, message)
assert exc_info.value.status_code == http.HTTPStatus(status_code)
assert str(exc_info.value) == "it went bad"
def test_404(self):
status_code = 404
headers, body = sample("pr_not_found", status_code)
with pytest.raises(BadRequest) as exc_info:
sansio.decipher_response(status_code, headers, body)
assert exc_info.value.status_code == http.HTTPStatus(status_code)
assert str(exc_info.value) == "Not Found"
def test_403_rate_limit_exceeded(self):
status_code = 403
headers = {
"content-type": "application/json; charset=utf-8",
"x-ratelimit-limit": "2",
"x-ratelimit-remaining": "0",
"x-ratelimit-reset": "1",
}
body = json.dumps({"message": "oops"}).encode("UTF-8")
with pytest.raises(RateLimitExceeded) as exc_info:
sansio.decipher_response(status_code, headers, body)
assert exc_info.value.status_code == http.HTTPStatus(status_code)
def test_403_forbidden(self):
status_code = 403
headers = {
"content-type": "application/json; charset=utf-8",
"x-ratelimit-limit": "2",
"x-ratelimit-remaining": "1",
"x-ratelimit-reset": "1",
}
with pytest.raises(BadRequest) as exc_info:
sansio.decipher_response(status_code, headers, b"")
assert exc_info.value.status_code == http.HTTPStatus(status_code)
def test_422(self):
status_code = 422
errors = [{"resource": "Issue", "field": "title", "code": "missing_field"}]
body = json.dumps({"message": "it went bad", "errors": errors})
body = body.encode("utf-8")
headers = {"content-type": "application/json; charset=utf-8"}
with pytest.raises(InvalidField) as exc_info:
sansio.decipher_response(status_code, headers, body)
assert exc_info.value.status_code == http.HTTPStatus(status_code)
assert str(exc_info.value) == "it went bad for 'title'"
def test_422_custom_code(self):
status_code = 422
errors = [
{
"resource": "PullRequest",
"code": "custom",
"message": "A pull request already exists for foo:1.",
}
]
body = json.dumps({"message": "it went bad", "errors": errors})
body = body.encode("utf-8")
headers = {"content-type": "application/json; charset=utf-8"}
with pytest.raises(ValidationError) as exc_info:
sansio.decipher_response(status_code, headers, body)
assert exc_info.value.status_code == http.HTTPStatus(status_code)
assert (
str(exc_info.value)
== "it went bad: 'A pull request already exists for foo:1.'"
)
def test_422_no_errors_object(self):
status_code = 422
body = json.dumps(
{
"message": "Reference does not exist",
"documentation_url": "https://docs.github.com/en/free-pro-team@latest/rest/reference/git#delete-a-reference",
}
)
body = body.encode("utf-8")
headers = {"content-type": "application/json; charset=utf-8"}
with pytest.raises(InvalidField) as exc_info:
sansio.decipher_response(status_code, headers, body)
assert exc_info.value.status_code == http.HTTPStatus(status_code)
assert str(exc_info.value) == "Reference does not exist"
def test_422_html_response(self):
# https://github.com/brettcannon/gidgethub/issues/81
status_code = 422
body = "<html><body>Mistakes were made ...</body></html>"
encoded_body = body.encode("utf-8")
headers = {"content-type": "text/html; charset=utf-8"}
with pytest.raises(BadRequestUnknownError) as exc_info:
sansio.decipher_response(status_code, headers, encoded_body)
assert exc_info.value.status_code == http.HTTPStatus(status_code)
assert exc_info.value.response == body
def test_3XX(self):
status_code = 301
with pytest.raises(RedirectionException) as exc_info:
sansio.decipher_response(status_code, {}, b"")
assert exc_info.value.status_code == http.HTTPStatus(status_code)
def test_2XX_error(self):
status_code = 205
with pytest.raises(HTTPException) as exc_info:
sansio.decipher_response(status_code, {}, b"")
assert exc_info.value.status_code == http.HTTPStatus(status_code)
def test_200(self):
status_code = 200
headers, body = sample("pr_single", status_code)
data, rate_limit, more = sansio.decipher_response(status_code, headers, body)
assert more is None
assert rate_limit.remaining == 53
assert data["url"] == "https://api.github.com/repos/python/cpython/pulls/1"
def test_201(self):
"""Test a 201 response along with non-pagination Link header."""
status_code = 201
headers = {
"x-ratelimit-limit": "60",
"x-ratelimit-remaining": "50",
"x-ratelimit-reset": "12345678",
"content-type": "application/json; charset=utf-8",
"link": '<http://example.com>; test="unimportant"',
}
data = {
"id": 208_045_946,
"url": "https://api.github.com/repos/octocat/Hello-World/labels/bug",
"name": "bug",
"color": "f29513",
"default": True,
}
body = json.dumps(data).encode("UTF-8")
returned_data, rate_limit, more = sansio.decipher_response(
status_code, headers, body
)
assert more is None
assert rate_limit.limit == 60
assert returned_data == data
def test_202(self):
# https://github.com/brettcannon/gidgethub/issues/171
status_code = 202
headers = {
"x-ratelimit-limit": "5000",
"x-ratelimit-remaining": "4987",
"x-ratelimit-reset": "1641847010",
"content-type": "application/json; charset=utf-8",
}
data = {
"id": 446_568_946,
"name": "Hello-World",
"fork": True,
"forks": 0,
}
body = json.dumps(data).encode("UTF-8")
returned_data, rate_limit, more = sansio.decipher_response(
status_code, headers, body
)
assert more is None
assert returned_data == data
def test_204(self):
"""Test both a 204 response and an empty response body."""
status_code = 204
headers, body = sample("pr_merged", status_code)
data, rate_limit, more = sansio.decipher_response(status_code, headers, body)
assert more is None
assert rate_limit.remaining == 41
assert data is None
def test_next(self):
status_code = 200
headers, body = sample("pr_page_1", status_code)
data, rate_limit, more = sansio.decipher_response(status_code, headers, body)
assert more == "https://api.github.com/repositories/4164482/pulls?page=2"
assert rate_limit.remaining == 53
assert data[0]["url"] == "https://api.github.com/repos/django/django/pulls/8053"
headers, body = sample("pr_page_2", status_code)
data, rate_limit, more = sansio.decipher_response(status_code, headers, body)
assert more == "https://api.github.com/repositories/4164482/pulls?page=3"
assert rate_limit.remaining == 50
assert data[0]["url"] == "https://api.github.com/repos/django/django/pulls/7805"
headers, body = sample("pr_page_last", status_code)
data, rate_limit, more = sansio.decipher_response(status_code, headers, body)
assert more is None
assert rate_limit.remaining == 48
assert data[0]["url"] == "https://api.github.com/repos/django/django/pulls/6395"
@pytest.mark.asyncio
def test_next_with_search_api(self):
status_code = 200
headers, body = sample("search_issues_page_1", status_code)
data, rate_limit, more = sansio.decipher_response(status_code, headers, body)
assert more == (
"https://api.github.com/search/issues"
"?q=repo%3Abrettcannon%2Fgidgethub+state%3Aclosed"
"+rate+&per_page=3&page=2"
)
assert rate_limit.remaining == 9
assert {"items", "incomplete_results", "total_count"} == data.keys()
expected_first_url = (
"https://api.github.com/repos/brettcannon/gidgethub/issues/25"
)
assert data["items"][0]["url"] == expected_first_url
headers, body = sample("search_issues_page_last", status_code)
data, rate_limit, more = sansio.decipher_response(status_code, headers, body)
assert more is None
assert rate_limit.remaining == 9
assert {"items", "incomplete_results", "total_count"} == data.keys()
expected_first_url = (
"https://api.github.com/repos/brettcannon/gidgethub/issues/10"
)
assert data["items"][0]["url"] == expected_first_url
def test_text_body(self):
"""Test requesting non-JSON data like a diff."""
status_code = 200
headers, body = sample("pr_diff", status_code)
data, rate_limit, more = sansio.decipher_response(status_code, headers, body)
assert more is None
assert rate_limit.remaining == 43
assert data.startswith("diff --git")
def test_no_ratelimit(self):
"""Test no ratelimit in headers."""
status_code = 201
headers = {
"content-type": "application/json; charset=utf-8",
"link": '<http://example.com>; test="unimportant"',
}
data = {
"id": 208_045_946,
"url": "https://api.github.com/repos/octocat/Hello-World/labels/bug",
"name": "bug",
"color": "f29513",
"default": True,
}
body = json.dumps(data).encode("UTF-8")
returned_data, rate_limit, more = sansio.decipher_response(
status_code, headers, body
)
assert more is None
assert rate_limit is None
assert returned_data == data
class TestFormatUrl:
"""Tests for gidgethub.sansio.format_url()."""
@pytest.mark.parametrize(
"base_url",
["https://api.github.com/notifications", "https://my.host.com/notifications"],
)
def test_absolute_url(self, base_url):
url = sansio.format_url(base_url, {}, base_url=base_url)
assert url == base_url
def test_different_base_and_absolute_url(self):
url = sansio.format_url(
"https://api.github.com/notifications",
{},
base_url="https://my.host.com/notifications",
)
assert url == "https://api.github.com/notifications"
@pytest.mark.parametrize(
"base_url", ["https://api.github.com", "https://my.host.com"]
)
def test_relative_url(self, base_url):
url = sansio.format_url("/notifications", {}, base_url=base_url)
assert url == f"{base_url}/notifications"
def test_template(self):
template_url = "https://api.github.com/users/octocat/gists{/gist_id}"
template_data = {"gist_id": "1234"}
# Substituting an absolute URL.
url = sansio.format_url(template_url, template_data)
assert url == "https://api.github.com/users/octocat/gists/1234"
# No substituting an absolute URL.
url = sansio.format_url(template_url, {})
assert url == "https://api.github.com/users/octocat/gists"
# Substituting a relative URL.
url = sansio.format_url("/users/octocat/gists{/gist_id}", template_data)
assert url == "https://api.github.com/users/octocat/gists/1234"
def test_template_with_base_url(self):
template_url = "https://my.host.com/users/octocat/gists{/gist_id}"
template_data = {"gist_id": "1234"}
# Substituting an absolute URL.
url = sansio.format_url(
template_url, template_data, base_url="https://my.host.com"
)
assert url == "https://my.host.com/users/octocat/gists/1234"
# No substituting an absolute URL.
url = sansio.format_url(template_url, {}, base_url="https://my.host.com")
assert url == "https://my.host.com/users/octocat/gists"
# Substituting a relative URL.
url = sansio.format_url(
"/users/octocat/gists{/gist_id}",
template_data,
base_url="https://my.host.com",
)
assert url == "https://my.host.com/users/octocat/gists/1234"
@pytest.mark.parametrize(
"base_url", ["https://api.github.com", "https://my.host.com"]
)
def test_quoting(self, base_url):
template_url = "https://api.github.com/repos/python/cpython/labels{/name}"
label = {"name": "CLA signed"}
url = sansio.format_url(template_url, label, base_url=base_url)
assert url == "https://api.github.com/repos/python/cpython/labels/CLA%20signed"
| {
"content_hash": "7394911406d3810fe569cd90b512544b",
"timestamp": "",
"source": "github",
"line_count": 621,
"max_line_length": 125,
"avg_line_length": 38.44927536231884,
"alnum_prop": 0.6043053985006491,
"repo_name": "brettcannon/gidgethub",
"id": "c97d63f52ecd6ee876ae00c31ac6de136a712e19",
"size": "23877",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_sansio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "122602"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
import os.path as op
from copy import deepcopy
from functools import partial
import pytest
import numpy as np
from scipy.io import savemat
from numpy.testing import assert_array_equal, assert_equal, assert_allclose
from mne.channels import (rename_channels, read_ch_adjacency, combine_channels,
find_ch_adjacency, make_1020_channel_selections,
read_custom_montage, equalize_channels,
make_standard_montage)
from mne.channels.channels import (_ch_neighbor_adjacency,
_compute_ch_adjacency)
from mne.io import (read_info, read_raw_fif, read_raw_ctf, read_raw_bti,
read_raw_eeglab, read_raw_kit, RawArray)
from mne.io.constants import FIFF
from mne import (pick_types, pick_channels, EpochsArray, EvokedArray,
make_ad_hoc_cov, create_info, read_events, Epochs)
from mne.datasets import testing
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
base_dir = op.join(io_dir, 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
eve_fname = op.join(base_dir, 'test-eve.fif')
fname_kit_157 = op.join(io_dir, 'kit', 'tests', 'data', 'test.sqd')
@pytest.mark.parametrize('preload', (True, False))
@pytest.mark.parametrize('proj', (True, False))
def test_reorder_channels(preload, proj):
"""Test reordering of channels."""
raw = read_raw_fif(raw_fname).crop(0, 0.1).del_proj()
if proj: # a no-op but should test it
raw._projector = np.eye(len(raw.ch_names))
if preload:
raw.load_data()
# with .reorder_channels
if proj and not preload:
with pytest.raises(RuntimeError, match='load data'):
raw.copy().reorder_channels(raw.ch_names[::-1])
return
raw_new = raw.copy().reorder_channels(raw.ch_names[::-1])
assert raw_new.ch_names == raw.ch_names[::-1]
if proj:
assert_allclose(raw_new._projector, raw._projector, atol=1e-12)
else:
assert raw._projector is None
assert raw_new._projector is None
assert_array_equal(raw[:][0], raw_new[:][0][::-1])
raw_new.reorder_channels(raw_new.ch_names[::-1][1:-1])
raw.drop_channels(raw.ch_names[:1] + raw.ch_names[-1:])
assert_array_equal(raw[:][0], raw_new[:][0])
with pytest.raises(ValueError, match='repeated'):
raw.reorder_channels(raw.ch_names[:1] + raw.ch_names[:1])
# and with .pick
reord = [1, 0] + list(range(2, len(raw.ch_names)))
rev = np.argsort(reord)
raw_new = raw.copy().pick(reord)
assert_array_equal(raw[:][0], raw_new[rev][0])
def test_rename_channels():
"""Test rename channels."""
info = read_info(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
pytest.raises(ValueError, rename_channels, info, mapping)
# Test improper mapping configuration
mapping = {'MEG 2641': 1.0}
pytest.raises(TypeError, rename_channels, info, mapping)
# Test non-unique mapping configuration
mapping = {'MEG 2641': 'MEG 2642'}
pytest.raises(ValueError, rename_channels, info, mapping)
# Test bad input
pytest.raises(ValueError, rename_channels, info, 1.)
pytest.raises(ValueError, rename_channels, info, 1.)
# Test successful changes
# Test ch_name and ch_names are changed
info2 = deepcopy(info) # for consistency at the start of each test
info2['bads'] = ['EEG 060', 'EOG 061']
mapping = {'EEG 060': 'EEG060', 'EOG 061': 'EOG061'}
rename_channels(info2, mapping)
assert info2['chs'][374]['ch_name'] == 'EEG060'
assert info2['ch_names'][374] == 'EEG060'
assert info2['chs'][375]['ch_name'] == 'EOG061'
assert info2['ch_names'][375] == 'EOG061'
assert_array_equal(['EEG060', 'EOG061'], info2['bads'])
info2 = deepcopy(info)
rename_channels(info2, lambda x: x.replace(' ', ''))
assert info2['chs'][373]['ch_name'] == 'EEG059'
info2 = deepcopy(info)
info2['bads'] = ['EEG 060', 'EEG 060']
rename_channels(info2, mapping)
assert_array_equal(['EEG060', 'EEG060'], info2['bads'])
# test that keys in Raw._orig_units will be renamed, too
raw = read_raw_fif(raw_fname).crop(0, 0.1)
old, new = 'EEG 060', 'New'
raw._orig_units = {old: 'V'}
raw.rename_channels({old: new})
assert old not in raw._orig_units
assert new in raw._orig_units
def test_set_channel_types():
"""Test set_channel_types."""
raw = read_raw_fif(raw_fname)
# Error Tests
# Test channel name exists in ch_names
mapping = {'EEG 160': 'EEG060'}
with pytest.raises(ValueError, match=r"name \(EEG 160\) doesn't exist"):
raw.set_channel_types(mapping)
# Test change to illegal channel type
mapping = {'EOG 061': 'xxx'}
with pytest.raises(ValueError, match='cannot change to this channel type'):
raw.set_channel_types(mapping)
# Test changing type if in proj
mapping = {'EEG 057': 'dbs', 'EEG 058': 'ecog', 'EEG 059': 'ecg',
'EEG 060': 'eog', 'EOG 061': 'seeg', 'MEG 2441': 'eeg',
'MEG 2443': 'eeg', 'MEG 2442': 'hbo', 'EEG 001': 'resp'}
raw2 = read_raw_fif(raw_fname)
raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061']
with pytest.raises(RuntimeError, match='type .* in projector "PCA-v1"'):
raw2.set_channel_types(mapping) # has prj
raw2.add_proj([], remove_existing=True)
with pytest.warns(RuntimeWarning, match='unit for channel.* has changed'):
raw2 = raw2.set_channel_types(mapping)
info = raw2.info
assert info['chs'][371]['ch_name'] == 'EEG 057'
assert info['chs'][371]['kind'] == FIFF.FIFFV_DBS_CH
assert info['chs'][371]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][371]['coil_type'] == FIFF.FIFFV_COIL_EEG
assert info['chs'][372]['ch_name'] == 'EEG 058'
assert info['chs'][372]['kind'] == FIFF.FIFFV_ECOG_CH
assert info['chs'][372]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][372]['coil_type'] == FIFF.FIFFV_COIL_EEG
assert info['chs'][373]['ch_name'] == 'EEG 059'
assert info['chs'][373]['kind'] == FIFF.FIFFV_ECG_CH
assert info['chs'][373]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][373]['coil_type'] == FIFF.FIFFV_COIL_NONE
assert info['chs'][374]['ch_name'] == 'EEG 060'
assert info['chs'][374]['kind'] == FIFF.FIFFV_EOG_CH
assert info['chs'][374]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][374]['coil_type'] == FIFF.FIFFV_COIL_NONE
assert info['chs'][375]['ch_name'] == 'EOG 061'
assert info['chs'][375]['kind'] == FIFF.FIFFV_SEEG_CH
assert info['chs'][375]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][375]['coil_type'] == FIFF.FIFFV_COIL_EEG
for idx in pick_channels(raw.ch_names, ['MEG 2441', 'MEG 2443']):
assert info['chs'][idx]['kind'] == FIFF.FIFFV_EEG_CH
assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_EEG
idx = pick_channels(raw.ch_names, ['MEG 2442'])[0]
assert info['chs'][idx]['kind'] == FIFF.FIFFV_FNIRS_CH
assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_MOL
assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO
# resp channel type
idx = pick_channels(raw.ch_names, ['EEG 001'])[0]
assert info['chs'][idx]['kind'] == FIFF.FIFFV_RESP_CH
assert info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_V
assert info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_NONE
# Test meaningful error when setting channel type with unknown unit
raw.info['chs'][0]['unit'] = 0.
ch_types = {raw.ch_names[0]: 'misc'}
pytest.raises(ValueError, raw.set_channel_types, ch_types)
def test_read_ch_adjacency(tmpdir):
"""Test reading channel adjacency templates."""
tempdir = str(tmpdir)
a = partial(np.array, dtype='<U7')
# no pep8
nbh = np.array([[(['MEG0111'], [[a(['MEG0131'])]]),
(['MEG0121'], [[a(['MEG0111'])],
[a(['MEG0131'])]]),
(['MEG0131'], [[a(['MEG0111'])],
[a(['MEG0121'])]])]],
dtype=[('label', 'O'), ('neighblabel', 'O')])
mat = dict(neighbours=nbh)
mat_fname = op.join(tempdir, 'test_mat.mat')
savemat(mat_fname, mat, oned_as='row')
ch_adjacency, ch_names = read_ch_adjacency(mat_fname)
x = ch_adjacency
assert_equal(x.shape[0], len(ch_names))
assert_equal(x.shape, (3, 3))
assert_equal(x[0, 1], False)
assert_equal(x[0, 2], True)
assert np.all(x.diagonal())
pytest.raises(ValueError, read_ch_adjacency, mat_fname, [0, 3])
ch_adjacency, ch_names = read_ch_adjacency(mat_fname, picks=[0, 2])
assert_equal(ch_adjacency.shape[0], 2)
assert_equal(len(ch_names), 2)
ch_names = ['EEG01', 'EEG02', 'EEG03']
neighbors = [['EEG02'], ['EEG04'], ['EEG02']]
pytest.raises(ValueError, _ch_neighbor_adjacency, ch_names, neighbors)
neighbors = [['EEG02'], ['EEG01', 'EEG03'], ['EEG 02']]
pytest.raises(ValueError, _ch_neighbor_adjacency, ch_names[:2],
neighbors)
neighbors = [['EEG02'], 'EEG01', ['EEG 02']]
pytest.raises(ValueError, _ch_neighbor_adjacency, ch_names, neighbors)
adjacency, ch_names = read_ch_adjacency('neuromag306mag')
assert_equal(adjacency.shape, (102, 102))
assert_equal(len(ch_names), 102)
pytest.raises(ValueError, read_ch_adjacency, 'bananas!')
# In EGI 256, E31 sensor has no neighbour
a = partial(np.array)
nbh = np.array([[(['E31'], []),
(['E1'], [[a(['E2'])],
[a(['E3'])]]),
(['E2'], [[a(['E1'])],
[a(['E3'])]]),
(['E3'], [[a(['E1'])],
[a(['E2'])]])]],
dtype=[('label', 'O'), ('neighblabel', 'O')])
mat = dict(neighbours=nbh)
mat_fname = op.join(tempdir, 'test_isolated_mat.mat')
savemat(mat_fname, mat, oned_as='row')
ch_adjacency, ch_names = read_ch_adjacency(mat_fname)
x = ch_adjacency.todense()
assert_equal(x.shape[0], len(ch_names))
assert_equal(x.shape, (4, 4))
assert np.all(x.diagonal())
assert not np.any(x[0, 1:])
assert not np.any(x[1:, 0])
# Check for neighbours consistency. If a sensor is marked as a neighbour,
# then it should also have its neighbours defined.
a = partial(np.array)
nbh = np.array([[(['E31'], []),
(['E1'], [[a(['E8'])],
[a(['E3'])]]),
(['E2'], [[a(['E1'])],
[a(['E3'])]]),
(['E3'], [[a(['E1'])],
[a(['E2'])]])]],
dtype=[('label', 'O'), ('neighblabel', 'O')])
mat = dict(neighbours=nbh)
mat_fname = op.join(tempdir, 'test_error_mat.mat')
savemat(mat_fname, mat, oned_as='row')
pytest.raises(ValueError, read_ch_adjacency, mat_fname)
def test_get_set_sensor_positions():
"""Test get/set functions for sensor positions."""
raw1 = read_raw_fif(raw_fname)
picks = pick_types(raw1.info, meg=False, eeg=True)
pos = np.array([ch['loc'][:3] for ch in raw1.info['chs']])[picks]
raw_pos = raw1._get_channel_positions(picks=picks)
assert_array_equal(raw_pos, pos)
ch_name = raw1.info['ch_names'][13]
pytest.raises(ValueError, raw1._set_channel_positions, [1, 2], ['name'])
raw2 = read_raw_fif(raw_fname)
raw2.info['chs'][13]['loc'][:3] = np.array([1, 2, 3])
raw1._set_channel_positions([[1, 2, 3]], [ch_name])
assert_array_equal(raw1.info['chs'][13]['loc'],
raw2.info['chs'][13]['loc'])
@testing.requires_testing_data
def test_1020_selection():
"""Test making a 10/20 selection dict."""
base_dir = op.join(testing.data_path(download=False), 'EEGLAB')
raw_fname = op.join(base_dir, 'test_raw.set')
loc_fname = op.join(base_dir, 'test_chans.locs')
raw = read_raw_eeglab(raw_fname, preload=True)
montage = read_custom_montage(loc_fname)
raw = raw.rename_channels(dict(zip(raw.ch_names, montage.ch_names)))
raw.set_montage(montage)
for input in ("a_string", 100, raw, [1, 2]):
pytest.raises(TypeError, make_1020_channel_selections, input)
sels = make_1020_channel_selections(raw.info)
# are all frontal channels placed before all occipital channels?
for name, picks in sels.items():
fs = min([ii for ii, pick in enumerate(picks)
if raw.ch_names[pick].startswith("F")])
ps = max([ii for ii, pick in enumerate(picks)
if raw.ch_names[pick].startswith("O")])
assert fs > ps
# are channels in the correct selection?
fz_c3_c4 = [raw.ch_names.index(ch) for ch in ("Fz", "C3", "C4")]
for channel, roi in zip(fz_c3_c4, ("Midline", "Left", "Right")):
assert channel in sels[roi]
@testing.requires_testing_data
def test_find_ch_adjacency():
"""Test computing the adjacency matrix."""
data_path = testing.data_path()
raw = read_raw_fif(raw_fname, preload=True)
sizes = {'mag': 828, 'grad': 1700, 'eeg': 384}
nchans = {'mag': 102, 'grad': 204, 'eeg': 60}
for ch_type in ['mag', 'grad', 'eeg']:
conn, ch_names = find_ch_adjacency(raw.info, ch_type)
# Silly test for checking the number of neighbors.
assert_equal(conn.getnnz(), sizes[ch_type])
assert_equal(len(ch_names), nchans[ch_type])
pytest.raises(ValueError, find_ch_adjacency, raw.info, None)
# Test computing the conn matrix with gradiometers.
conn, ch_names = _compute_ch_adjacency(raw.info, 'grad')
assert_equal(conn.getnnz(), 2680)
# Test ch_type=None.
raw.pick_types(meg='mag')
find_ch_adjacency(raw.info, None)
bti_fname = op.join(data_path, 'BTi', 'erm_HFH', 'c,rfDC')
bti_config_name = op.join(data_path, 'BTi', 'erm_HFH', 'config')
raw = read_raw_bti(bti_fname, bti_config_name, None)
_, ch_names = find_ch_adjacency(raw.info, 'mag')
assert 'A1' in ch_names
ctf_fname = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
raw = read_raw_ctf(ctf_fname)
_, ch_names = find_ch_adjacency(raw.info, 'mag')
assert 'MLC11' in ch_names
pytest.raises(ValueError, find_ch_adjacency, raw.info, 'eog')
raw_kit = read_raw_kit(fname_kit_157)
neighb, ch_names = find_ch_adjacency(raw_kit.info, 'mag')
assert neighb.data.size == 1329
assert ch_names[0] == 'MEG 001'
@testing.requires_testing_data
def test_neuromag122_adjacency():
"""Test computing the adjacency matrix of Neuromag122-Data."""
nm122_fname = op.join(testing.data_path(), 'misc',
'neuromag122_test_file-raw.fif')
raw = read_raw_fif(nm122_fname, preload=True)
conn, ch_names = find_ch_adjacency(raw.info, 'grad')
assert conn.getnnz() == 1564
assert len(ch_names) == 122
assert conn.shape == (122, 122)
def test_drop_channels():
"""Test if dropping channels works with various arguments."""
raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1)
raw.drop_channels(["MEG 0111"]) # list argument
raw.drop_channels("MEG 0112") # str argument
raw.drop_channels({"MEG 0132", "MEG 0133"}) # set argument
pytest.raises(ValueError, raw.drop_channels, ["MEG 0111", 5])
pytest.raises(ValueError, raw.drop_channels, 5) # must be list or str
def test_pick_channels():
"""Test if picking channels works with various arguments."""
raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1)
# selected correctly 3 channels
raw.pick(['MEG 0113', 'MEG 0112', 'MEG 0111'])
assert len(raw.ch_names) == 3
# selected correctly 3 channels and ignored 'meg', and emit warning
with pytest.warns(RuntimeWarning, match='not present in the info'):
raw.pick(['MEG 0113', "meg", 'MEG 0112', 'MEG 0111'])
assert len(raw.ch_names) == 3
names_len = len(raw.ch_names)
raw.pick(['all']) # selected correctly all channels
assert len(raw.ch_names) == names_len
raw.pick('all') # selected correctly all channels
assert len(raw.ch_names) == names_len
def test_add_reference_channels():
"""Test if there is a new reference channel that consist of all zeros."""
raw = read_raw_fif(raw_fname, preload=True)
n_raw_original_channels = len(raw.ch_names)
epochs = Epochs(raw, read_events(eve_fname))
epochs.load_data()
epochs_original_shape = epochs._data.shape[1]
evoked = epochs.average()
n_evoked_original_channels = len(evoked.ch_names)
# Raw object
raw.add_reference_channels(['REF 123'])
assert len(raw.ch_names) == n_raw_original_channels + 1
assert np.all(raw.get_data()[-1] == 0)
# Epochs object
epochs.add_reference_channels(['REF 123'])
assert epochs._data.shape[1] == epochs_original_shape + 1
# Evoked object
evoked.add_reference_channels(['REF 123'])
assert len(evoked.ch_names) == n_evoked_original_channels + 1
assert np.all(evoked._data[-1] == 0)
def test_equalize_channels():
"""Test equalizing channels and their ordering."""
# This function only tests the generic functionality of equalize_channels.
# Additional tests for each instance type are included in the accompanying
# test suite for each type.
pytest.raises(TypeError, equalize_channels, ['foo', 'bar'],
match='Instances to be modified must be an instance of')
raw = RawArray([[1.], [2.], [3.], [4.]],
create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.))
epochs = EpochsArray([[[1.], [2.], [3.]]],
create_info(['CH5', 'CH2', 'CH1'], sfreq=1.))
cov = make_ad_hoc_cov(create_info(['CH2', 'CH1', 'CH8'], sfreq=1.,
ch_types='eeg'))
cov['bads'] = ['CH1']
ave = EvokedArray([[1.], [2.]], create_info(['CH1', 'CH2'], sfreq=1.))
raw2, epochs2, cov2, ave2 = equalize_channels([raw, epochs, cov, ave],
copy=True)
# The Raw object was the first in the list, so should have been used as
# template for the ordering of the channels. No bad channels should have
# been dropped.
assert raw2.ch_names == ['CH1', 'CH2']
assert_array_equal(raw2.get_data(), [[1.], [2.]])
assert epochs2.ch_names == ['CH1', 'CH2']
assert_array_equal(epochs2.get_data(), [[[3.], [2.]]])
assert cov2.ch_names == ['CH1', 'CH2']
assert cov2['bads'] == cov['bads']
assert ave2.ch_names == ave.ch_names
assert_array_equal(ave2.data, ave.data)
# All objects should have been copied, except for the Evoked object which
# did not have to be touched.
assert raw is not raw2
assert epochs is not epochs2
assert cov is not cov2
assert ave is ave2
# Test in-place operation
raw2, epochs2 = equalize_channels([raw, epochs], copy=False)
assert raw is raw2
assert epochs is epochs2
def test_combine_channels():
"""Test channel combination on Raw, Epochs, and Evoked."""
raw = read_raw_fif(raw_fname, preload=True)
raw_ch_bad = read_raw_fif(raw_fname, preload=True)
raw_ch_bad.info['bads'] = ['MEG 0113', 'MEG 0112']
epochs = Epochs(raw, read_events(eve_fname))
evoked = epochs.average()
good = dict(foo=[0, 1, 3, 4], bar=[5, 2]) # good grad and mag
# Test good cases
combine_channels(raw, good)
combined_epochs = combine_channels(epochs, good)
assert_array_equal(combined_epochs.events, epochs.events)
combine_channels(evoked, good)
combine_channels(raw, good, drop_bad=True)
combine_channels(raw_ch_bad, good, drop_bad=True)
# Test with stimulus channels
combine_stim = combine_channels(raw, good, keep_stim=True)
target_nchan = len(good) + len(pick_types(raw.info, meg=False, stim=True))
assert combine_stim.info['nchan'] == target_nchan
# Test results with one ROI
good_single = dict(foo=[0, 1, 3, 4]) # good grad
combined_mean = combine_channels(raw, good_single, method='mean')
combined_median = combine_channels(raw, good_single, method='median')
combined_std = combine_channels(raw, good_single, method='std')
foo_mean = np.mean(raw.get_data()[good_single['foo']], axis=0)
foo_median = np.median(raw.get_data()[good_single['foo']], axis=0)
foo_std = np.std(raw.get_data()[good_single['foo']], axis=0)
assert_array_equal(combined_mean.get_data(),
np.expand_dims(foo_mean, axis=0))
assert_array_equal(combined_median.get_data(),
np.expand_dims(foo_median, axis=0))
assert_array_equal(combined_std.get_data(),
np.expand_dims(foo_std, axis=0))
# Test bad cases
bad1 = dict(foo=[0, 376], bar=[5, 2]) # out of bounds
bad2 = dict(foo=[0, 2], bar=[5, 2]) # type mix in same group
with pytest.raises(ValueError, match='"method" must be a callable, or'):
combine_channels(raw, good, method='bad_method')
with pytest.raises(TypeError, match='"keep_stim" must be of type bool'):
combine_channels(raw, good, keep_stim='bad_type')
with pytest.raises(TypeError, match='"drop_bad" must be of type bool'):
combine_channels(raw, good, drop_bad='bad_type')
with pytest.raises(ValueError, match='Some channel indices are out of'):
combine_channels(raw, bad1)
with pytest.raises(ValueError, match='Cannot combine sensors of diff'):
combine_channels(raw, bad2)
# Test warnings
raw_no_stim = read_raw_fif(raw_fname, preload=True)
raw_no_stim.pick_types(meg=True, stim=False)
warn1 = dict(foo=[375, 375], bar=[5, 2]) # same channel in same group
warn2 = dict(foo=[375], bar=[5, 2]) # one channel (last channel)
warn3 = dict(foo=[0, 4], bar=[5, 2]) # one good channel left
with pytest.warns(RuntimeWarning, match='Could not find stimulus'):
combine_channels(raw_no_stim, good, keep_stim=True)
with pytest.warns(RuntimeWarning, match='Less than 2 channels') as record:
combine_channels(raw, warn1)
combine_channels(raw, warn2)
combine_channels(raw_ch_bad, warn3, drop_bad=True)
assert len(record) == 3
def test_get_montage():
"""Test ContainsMixin.get_montage()."""
ch_names = make_standard_montage('standard_1020').ch_names
sfreq = 512
data = np.zeros((len(ch_names), sfreq * 2))
raw = RawArray(data, create_info(ch_names, sfreq, 'eeg'))
raw.set_montage('standard_1020')
assert len(raw.get_montage().ch_names) == len(ch_names)
raw.info['bads'] = [ch_names[0]]
assert len(raw.get_montage().ch_names) == len(ch_names)
| {
"content_hash": "fd6e9290972fc7c7f3143f3e74c9e9bf",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 79,
"avg_line_length": 42.83584905660377,
"alnum_prop": 0.6084217944765009,
"repo_name": "bloyl/mne-python",
"id": "2543e2e1a80cb428e96cecfbc19f7c8875f633b6",
"size": "22843",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "mne/channels/tests/test_channels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "Makefile",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "8190297"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from HBaseHandler.HbaseHandler import *
import tornado.autoreload
import tornado.httpserver
import tornado.ioloop
import tornado.locale
import tornado.web
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
settings = \
{
'cookie_secret': 'hbasehbasehbasepythonpythonthriftthrift!!!',
'xsrf_cookies': True,
'gzip': False,
'debug': True,
'xheaders': True,
}
application = tornado.web.Application([
(r'/HBase/Create', CreateTableHandler),
(r'/HBase/TableList', ListAllTableHandler),
(r'/HBase/Disable', DisableTableHandler),
(r'/HBase/Enable', EnableTableHandler),
(r'/HBase/DropTable', DropTableHandler),
(r'/HBase/TableRegion', GetTableRegionsInfoHandler),
(r'/HBase/ColDesc', GetColumnDescriptorsHandler),
(r'/HBase/Compact', CompactHandler),
(r'/HBase/delete', DeleteHandler),
(r'/HBase/Mutate', MutateHandler),
(r'/HBase/MutateRows', MutateRowsHandler),
(r'/HBase/AllVersion', GetAllVersionHandler),
(r'/HBase/Scan', ScanTableHandler),
(r'/HBase/RowsWithCol', GetRowsWithColumnHandler),
], **settings)
if __name__ == '__main__':
server = tornado.httpserver.HTTPServer(application)
server.listen(20004)
loop = tornado.ioloop.IOLoop.instance()
tornado.autoreload.start(loop)
loop.start()
| {
"content_hash": "ee911d19534b70552b19abd5dedc75fa",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 70,
"avg_line_length": 29.57777777777778,
"alnum_prop": 0.69045830202855,
"repo_name": "yangdyi/hbase_api",
"id": "ce500b9b204f1eed5fd500eeba464fc9e471297f",
"size": "1370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HBase_python/HBase_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28273"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Java",
"bytes": "33259"
},
{
"name": "Python",
"bytes": "1697329"
},
{
"name": "Thrift",
"bytes": "23700"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestDropoutOp(OpTest):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
self.outputs = {
'Out': self.inputs['X'],
'Mask': np.ones((32, 64)).astype('uint8')
}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', max_relative_error=0.05)
class TestDropoutOp2(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 1.0, 'fix_seed': True, 'is_test': False}
self.outputs = {
'Out': np.zeros((32, 64)).astype('float32'),
'Mask': np.zeros((32, 64)).astype('uint8')
}
class TestDropoutOp3(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
self.outputs = {
'Out': self.inputs['X'],
'Mask': np.ones((32, 64, 2)).astype('uint8')
}
class TestDropoutOp4(OpTest):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {'dropout_prob': 0.35, 'fix_seed': True, 'is_test': True}
self.outputs = {
'Out': self.inputs['X'] * (1.0 - self.attrs['dropout_prob'])
}
def test_check_output(self):
self.check_output()
class TestDropoutOp5(OpTest):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
self.attrs = {'dropout_prob': 0.75, 'is_test': True}
self.outputs = {
'Out': self.inputs['X'] * (1.0 - self.attrs['dropout_prob'])
}
def test_check_output(self):
self.check_output()
class TestDropoutOp6(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {
'dropout_prob': 1.0,
'fix_seed': True,
'is_test': False,
'dropout_implementation': 'upscale_in_train'
}
self.outputs = {
'Out': np.zeros((32, 64)).astype('float32'),
'Mask': np.zeros((32, 64)).astype('uint8')
}
class TestDropoutOp7(TestDropoutOp):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64, 2)).astype("float32")}
self.attrs = {
'dropout_prob': 0.0,
'fix_seed': True,
'is_test': False,
'dropout_implementation': 'upscale_in_train'
}
self.outputs = {
'Out': self.inputs['X'],
'Mask': np.ones((32, 64, 2)).astype('uint8')
}
class TestDropoutOp8(OpTest):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64)).astype("float32")}
self.attrs = {
'dropout_prob': 0.35,
'fix_seed': True,
'is_test': True,
'dropout_implementation': 'upscale_in_train'
}
self.outputs = {'Out': self.inputs['X']}
def test_check_output(self):
self.check_output()
class TestDropoutOp9(OpTest):
def setUp(self):
self.op_type = "dropout"
self.inputs = {'X': np.random.random((32, 64, 3)).astype("float32")}
self.attrs = {
'dropout_prob': 0.75,
'is_test': True,
'dropout_implementation': 'upscale_in_train'
}
self.outputs = {'Out': self.inputs['X']}
def test_check_output(self):
self.check_output()
class TestFP16DropoutOp(OpTest):
def setUp(self):
self.op_type = "dropout"
self.init_test_case()
x = np.random.random(self.input_size).astype("float16")
out = x * (1.0 - self.prob)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {
'dropout_prob': self.prob,
'fix_seed': self.fix_seed,
'is_test': True
}
self.outputs = {'Out': out}
def init_test_case(self):
self.input_size = [32, 64]
self.prob = 0.35
self.fix_seed = True
def test_check_output(self):
if core.is_compiled_with_cuda() and core.op_support_gpu("dropout"):
self.check_output_with_place(core.CUDAPlace(0), atol=1e-3)
class TestFP16DropoutOp2(TestFP16DropoutOp):
def init_test_case(self):
self.input_size = [32, 64, 3]
self.prob = 0.75
self.fix_seed = False
class TestDropoutOpError(OpTest):
def test_errors(self):
with program_guard(Program(), Program()):
def test_Variable():
# the input of dropout must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
fluid.layers.dropout(x1, dropout_prob=0.5)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
# the input dtype of dropout must be float16 or float32 or float64
# float16 only can be set on GPU place
x2 = fluid.layers.data(
name='x2', shape=[3, 4, 5, 6], dtype="int32")
fluid.layers.dropout(x2, dropout_prob=0.5)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6201fbd289861e143ced301ffa9264b6",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 82,
"avg_line_length": 30.84536082474227,
"alnum_prop": 0.5412767379679144,
"repo_name": "chengduoZH/Paddle",
"id": "08ec1fce8d3dad06c10686914fd1f834076cef8e",
"size": "6597",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_dropout_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10146609"
},
{
"name": "CMake",
"bytes": "291349"
},
{
"name": "Cuda",
"bytes": "1192566"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7124331"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
} |
from supybot.test import *
class TopicTestCase(ChannelPluginTestCase):
plugins = ('Topic','User',)
def testRemove(self):
self.assertError('topic remove 1')
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertError('topic remove 0')
self.assertNotError('topic remove 3')
self.assertNotError('topic remove 2')
self.assertNotError('topic remove 1')
self.assertError('topic remove 1')
def testReplace(self):
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic replace 1 oof', 'oof.*bar.*baz')
self.assertRegexp('topic replace -1 zab', 'oof.*bar.*zab')
self.assertRegexp('topic replace 2 lorem ipsum',
'oof.*lorem ipsum.*zab')
self.assertRegexp('topic replace 2 rab', 'oof.*rab.*zab')
def testGet(self):
self.assertError('topic get 1')
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic get 1', '^foo')
self.assertError('topic get 0')
def testAdd(self):
self.assertError('topic add #floorgle')
m = self.getMsg('topic add foo')
self.assertEqual(m.command, 'TOPIC')
self.assertEqual(m.args[0], self.channel)
self.assertEqual(m.args[1], 'foo (test)')
m = self.getMsg('topic add bar')
self.assertEqual(m.command, 'TOPIC')
self.assertEqual(m.args[0], self.channel)
self.assertEqual(m.args[1], 'foo (test) || bar (test)')
def testManageCapabilities(self):
try:
self.irc.feedMsg(ircmsgs.mode(self.channel, args=('+o', self.nick),
prefix=self.prefix))
self.irc.feedMsg(ircmsgs.mode(self.channel, args=('+t'),
prefix=self.prefix))
world.testing = False
origuser = self.prefix
self.prefix = 'stuff!stuff@stuff'
self.assertNotError('register nottester stuff', private=True)
self.assertError('topic add foo')
origconf = conf.supybot.plugins.Topic.requireManageCapability()
conf.supybot.plugins.Topic.requireManageCapability.setValue('')
self.assertNotError('topic add foo')
finally:
world.testing = True
self.prefix = origuser
conf.supybot.plugins.Topic.requireManageCapability.setValue(origconf)
def testInsert(self):
m = self.getMsg('topic add foo')
self.assertEqual(m.args[1], 'foo (test)')
m = self.getMsg('topic insert bar')
self.assertEqual(m.args[1], 'bar (test) || foo (test)')
def testChange(self):
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic change -1 s/baz/biff/',
r'foo.*bar.*biff')
self.assertRegexp('topic change 2 s/bar/baz/',
r'foo.*baz.*biff')
self.assertRegexp('topic change 1 s/foo/bar/',
r'bar.*baz.*biff')
self.assertRegexp('topic change -2 s/baz/bazz/',
r'bar.*bazz.*biff')
self.assertError('topic change 0 s/baz/biff/')
def testConfig(self):
try:
original = conf.supybot.plugins.Topic.separator()
conf.supybot.plugins.Topic.separator.setValue(' <==> ')
_ = self.getMsg('topic add foo')
m = self.getMsg('topic add bar')
self.failUnless('<==>' in m.args[1])
finally:
conf.supybot.plugins.Topic.separator.setValue(original)
def testReorder(self):
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic reorder 2 1 3', r'bar.*foo.*baz')
self.assertRegexp('topic reorder 3 -2 1', r'baz.*foo.*bar')
self.assertError('topic reorder 0 1 2')
self.assertError('topic reorder 1 -2 2')
self.assertError('topic reorder 1 2')
self.assertError('topic reorder 2 3 4')
self.assertError('topic reorder 1 2 2')
self.assertError('topic reorder 1 1 2 3')
_ = self.getMsg('topic remove 1')
_ = self.getMsg('topic remove 1')
self.assertError('topic reorder 1')
_ = self.getMsg('topic remove 1')
self.assertError('topic reorder 0')
def testList(self):
_ = self.getMsg('topic add foo')
self.assertRegexp('topic list', '1: foo')
_ = self.getMsg('topic add bar')
self.assertRegexp('topic list', '1: foo .*2: bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic list', '1: foo .* 2: bar .* and 3: baz')
def testSet(self):
_ = self.getMsg('topic add foo')
self.assertRegexp('topic set -1 bar', 'bar')
self.assertNotRegexp('topic set -1 baz', 'bar')
self.assertResponse('topic set foo bar baz', 'foo bar baz')
# Catch a bug we had where setting topic 1 would reset the whole topic
orig = conf.supybot.plugins.Topic.format()
sep = conf.supybot.plugins.Topic.separator()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic add baz', 'foo bar baz%sbaz' % sep)
self.assertResponse('topic set 1 bar', 'bar%sbaz' % sep)
finally:
conf.supybot.plugins.Topic.format.setValue(orig)
def testUndo(self):
try:
original = conf.supybot.plugins.Topic.format()
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic set ""', '')
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo || bar')
self.assertResponse('topic add baz', 'foo || bar || baz')
self.assertResponse('topic undo', 'foo || bar')
self.assertResponse('topic undo', 'foo')
self.assertResponse('topic undo', '')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testUndoRedo(self):
try:
original = conf.supybot.plugins.Topic.format()
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic set ""', '')
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo || bar')
self.assertResponse('topic add baz', 'foo || bar || baz')
self.assertResponse('topic undo', 'foo || bar')
self.assertResponse('topic undo', 'foo')
self.assertResponse('topic undo', '')
self.assertResponse('topic redo', 'foo')
self.assertResponse('topic redo', 'foo || bar')
self.assertResponse('topic redo', 'foo || bar || baz')
self.assertResponse('topic undo', 'foo || bar')
self.assertResponse('topic undo', 'foo')
self.assertResponse('topic redo', 'foo || bar')
self.assertResponse('topic undo', 'foo')
self.assertResponse('topic redo', 'foo || bar')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testSwap(self):
original = conf.supybot.plugins.Topic.format()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic set ""', '')
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo || bar')
self.assertResponse('topic add baz', 'foo || bar || baz')
self.assertResponse('topic swap 1 2', 'bar || foo || baz')
self.assertResponse('topic swap 1 -1', 'baz || foo || bar')
self.assertError('topic swap -1 -1')
self.assertError('topic swap 2 -2')
self.assertError('topic swap 1 -3')
self.assertError('topic swap -2 2')
self.assertError('topic swap -3 1')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testDefault(self):
self.assertError('topic default')
try:
original = conf.supybot.plugins.Topic.default()
conf.supybot.plugins.Topic.default.setValue('foo bar baz')
self.assertResponse('topic default', 'foo bar baz')
finally:
conf.supybot.plugins.Topic.default.setValue(original)
def testTopic(self):
original = conf.supybot.plugins.Topic.format()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertError('topic addd') # Error to send too many args.
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo || bar')
self.assertResponse('topic', 'foo || bar')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testSeparator(self):
original = conf.supybot.plugins.Topic.format()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo || bar')
self.assertResponse('topic add baz', 'foo || bar || baz')
self.assertResponse('topic separator |', 'foo | bar | baz')
self.assertResponse('topic separator ::', 'foo :: bar :: baz')
self.assertResponse('topic separator ||', 'foo || bar || baz')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testFit(self):
original = conf.supybot.plugins.Topic.format()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.irc.state.supported['TOPICLEN'] = 20
self.assertResponse('topic fit foo', 'foo')
self.assertResponse('topic fit bar', 'foo || bar')
self.assertResponse('topic fit baz', 'foo || bar || baz')
self.assertResponse('topic fit qux', 'bar || baz || qux')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
self.irc.state.supported.pop('TOPICLEN', None)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| {
"content_hash": "d0f7c062f391695fe02eb4e74b5bac36",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 81,
"avg_line_length": 44.294117647058826,
"alnum_prop": 0.5762663631189527,
"repo_name": "buildbot/supybot",
"id": "83adcfaee687088bb4815b1108e40b8a939af6d3",
"size": "12131",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "plugins/Topic/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2026939"
}
],
"symlink_target": ""
} |
import rospy
import math
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
class JoyTwist(object):
def __init__(self):
self._joy_sub = rospy.Subscriber('joy', Joy, self.joy_callback, queue_size = 1)
self._twist_pub = rospy.Publisher('/cmd_vel', Twist, queue_size = 1)
def joy_callback(self, joy_msg):
if joy_msg.buttons[0] == 1:
twist = Twist()
twist.linear.x = joy_msg.axes[1] * 0.1
twist.angular.z = joy_msg.axes[0] * math.pi * 0.5
self._twist_pub.publish(twist)
if __name__ == '__main__':
rospy.init_node('joy_twist')
joy_twist = JoyTwist()
rospy.spin()
| {
"content_hash": "11a283be3cb64a0b6ee3031963d3f315",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 87,
"avg_line_length": 31.904761904761905,
"alnum_prop": 0.5940298507462687,
"repo_name": "kato-masahiro/raspimouse_maze_manual",
"id": "43cf2550196fb601b5410a14ffba60cec95eaddd",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/controller.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "6241"
},
{
"name": "Python",
"bytes": "13030"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
} |
import config
from turkic.cli import handler
@handler("import")
def importstuff(args):
pass
@handler()
def dump(args):
pass
| {
"content_hash": "607af60aca788c93c053d612001b6e79",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 30,
"avg_line_length": 13.4,
"alnum_prop": 0.7089552238805971,
"repo_name": "johndoherty/turkic",
"id": "81898d98b7c917f657de5bf8a99038d12a078d60",
"size": "134",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "turkic/skeleton/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1961"
},
{
"name": "HTML",
"bytes": "319"
},
{
"name": "JavaScript",
"bytes": "18708"
},
{
"name": "Python",
"bytes": "57832"
}
],
"symlink_target": ""
} |
from modules import BaseModule
import re
import random
class EightBallModule(BaseModule):
def do8ball(self, message):
split = re.split("(\[\s\]\s[^\[\]]+)+?\s?", message)
choices = [x for x in split if len(x) > 0]
choice = random.randint(0, len(choices)-1)
out = ""
for i in range(len(choices)):
if choice == i:
pos = choices[i].find("]")+1
out += " [x]" + choices[i][pos:]
else:
out += " " + choices[i]
return out
def onprivmsg(self, conn, sender, to, message):
if re.match("^((?:\[\s\]\s[^\[\]]+\s?)+)", message) is None:
return
result = self.do8ball(message)
if to == conn.nick:
rcpt = sender
else:
rcpt = to
conn.privmsg(rcpt, sender + ":" + result)
| {
"content_hash": "7eb10f635d1e6237b93211c2ecfd419b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 27.870967741935484,
"alnum_prop": 0.4791666666666667,
"repo_name": "hrkfdn/cherry",
"id": "9ed9771dd81ead7a158776166283b5fce9238372",
"size": "864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mod_8ball.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24588"
}
],
"symlink_target": ""
} |
from pylab import *
class Container:
def contains(self, (x,y)):
raise "Container is an abstract class"
class Rectangle(Container):
def __init__(self, x0, x1, y0, y1):
self.x0 = x0
self.x1 = x1
self.y0 = y0
self.y1 = y1
self.width = x1 - x0
self.height = y1 - y0
def contains(self, (x,y)):
return self.x0 <= x and x <= self.x1 and self.y0 <= y and y <= self.y1
class VelocityVector:
def __init__(self, x, y):
self.x = x
self.y = y
def magnitude(self):
return (self.x**2.0 + self.y**2.0)**(0.5)
def invert_y(self):
self.y = - self.y
def invert_x(self):
self.x = - self.x
class Particle:
def __init__(self, location, velocity):
self.location = location
self.velocity = velocity
class SquareSimulation:
def __init__(self, square, particle):
self.square = square
self.particle = particle
def run(self, num_steps = 500):
locations = []
inversions = []
for i in xrange(num_steps):
x, y, inverted = self.step()
locations.append((x,y))
if inverted:
inversions.append(inverted)
return (locations, inversions)
def step(self):
x, y = self.particle.location
x += self.particle.velocity.x
y += self.particle.velocity.y
inverted = self.change_direction(x, y, self.particle.velocity)
self.particle.location = (x,y)
return (x, y, inverted)
def change_direction(self, x, y, velocity, epsilon = 0.01):
inverted = self.invert_velocity(x, y, velocity)
if inverted:
return inverted
perturbed_x = x + velocity.x * epsilon
perturbed_y = y + velocity.y * epsilon
inverted = self.invert_velocity(perturbed_x, perturbed_y, velocity)
if inverted:
return inverted
return False
def invert_velocity(self, x, y, velocity):
if not self.square.contains((x,y)):
if x < self.square.x0 or x > self.square.x1:
velocity.invert_x()
return 'x'
else:
velocity.invert_y()
return 'y'
return False
def plot_locations(locations):
xvals = []
yvals = []
for x,y in locations:
xvals.append(x)
yvals.append(y)
ylim([0.0, 1.0])
xlim([0.0, 1.0])
plot(xvals, yvals)
show()
def minimum_periodic_sequence(sequence):
potential = []
while len(potential) == 0 or not valid_periodic_sequence(sequence, potential):
potential = potential_periodic_sequence(sequence, potential)
potential.append(sequence[len(potential)])
return potential
def valid_periodic_sequence(sequence, potential):
k = len(potential)
for i in xrange(len(sequence)):
if sequence[i] != potential[i % k]:
return False
return True
def potential_periodic_sequence(sequence, potential_sequence):
counter = 0
for val in sequence:
if len(potential_sequence) > 0 and val == sequence[counter]:
counter += 1
if counter >= len(potential_sequence):
return potential_sequence
else:
potential_sequence.extend(sequence[:counter])
potential_sequence.append(val)
counter = 0
if __name__ == '__main__':
square = Rectangle(0.0, 1.0, 0.0, 1.0)
velocity = VelocityVector(0.0023, 0.0005)
particle = Particle((0.75, 0.75), velocity)
simulation = SquareSimulation(square, particle)
locations, inversions = simulation.run(6000)
print minimum_periodic_sequence(inversions)
plot_locations(locations)
| {
"content_hash": "666b780a395637b318d19f05954fb403",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 82,
"avg_line_length": 27.551470588235293,
"alnum_prop": 0.5764611689351481,
"repo_name": "spectralflight/billiards",
"id": "afdc44cd7781e496157837b82f1ff0e6f1c8220e",
"size": "3747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4330"
},
{
"name": "TeX",
"bytes": "58299"
}
],
"symlink_target": ""
} |
import os
__all__ = ['test_client']
# Useful Constants
INSTALL_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(INSTALL_DIR, 'data')
| {
"content_hash": "28373c1845e654c02ec3c337f615a01e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 44,
"avg_line_length": 20.285714285714285,
"alnum_prop": 0.6619718309859155,
"repo_name": "threerings/splatd",
"id": "11848b327ed2d9c5c3f96d10fe527da87d1cf4c5",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "splat/ldaputils/test/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "176541"
}
],
"symlink_target": ""
} |
import subprocess
import tkinter as tk
from math import sqrt, ceil
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.pack()
self.create_widgets()
def create_widgets(self):
buttonFile = open("buttons.txt")
buttons = list(buttonFile)
maxNum = sqrt(len(buttons) + 1)
maxNum = int(ceil(maxNum))
r = 0
c = 0
for button in buttons:
button = button[:-1]
info = button.split(" = ")
self.button = tk.Button(self, height = 10, width = 15, text=info[0], command=lambda x=info[0], y=info[1]: self.command_parser(x, y))
# self.button = tk.Button(self, height = 10, width = 15)
# self.button["text"] = info[0]
# self.button["command"] = lambda: self.command_parser(info[1])
self.button.grid(row = r, column = c)
if (c >= maxNum - 1):
c = 0
r += 1
else:
c += 1
self.quit = tk.Button(self, text="QUIT", fg="red",
command=root.destroy, height = 10, width = 15)
self.quit.grid(row = maxNum - 1, column = maxNum - 1)
self.T = tk.Text(self, height = 1, width = (maxNum)*15)
self.T.tag_config('center', justify = tk.CENTER)
self.T.grid(row = maxNum, columnspan = maxNum)
self.T.insert(tk.END, "Select Command", 'center');
# self.hi_there = tk.Button(self, height = 10, width = 15)
# self.hi_there["text"] = "Hello World\n(click me)"
# self.hi_there["command"] = self.say_hi
# self.hi_there.grid(row = 0, column = 0)
#
# self.quit = tk.Button(self, text="QUIT", fg="red",
# command=root.destroy, height = 10, width = 15)
# self.quit.grid(row = 1, column = 1)
def command_parser(self, name, command):
command_args = command.split(" ")
if (command_args[-1] == '&'):
subprocess.Popen(command_args)
self.T.delete(1.0, tk.END)
self.T.insert(tk.END, name + " successful", 'center');
return
retCode = subprocess.call(command_args)
if (retCode == 0):
self.T.delete(1.0, tk.END)
self.T.insert(tk.END, name + " successful", 'center');
else:
self.T.delete(1.0, tk.END)
self.T.insert(tk.END, name + " Failed", 'center');
def say_hi(self):
print("hi there, everyone!")
root = tk.Tk()
root.wm_title("Surface Helper")
app = Application(master=root)
app.mainloop()
| {
"content_hash": "882094759790319796b7bfaa5ef86ae2",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 144,
"avg_line_length": 38.18840579710145,
"alnum_prop": 0.5259962049335863,
"repo_name": "NickTGraham/PythonPack",
"id": "9919c1c84c83202615bcd1a8ff213061b42b9d7f",
"size": "2922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Surface/testGUI.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "27929"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
} |
"""Support for the Oracle database via the zxjdbc JDBC connector.
JDBC Driver
-----------
The official Oracle JDBC driver is at
http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
"""
import decimal
import re
from sqlalchemy import sql, types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
from sqlalchemy.engine import base, default
from sqlalchemy.sql import expression
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
#XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
self.binds[bindparam.key] = bindparam
binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, 'returning_parameters'):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, 'returning_parameters'):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
rrs.next()
except SQLException, sqle:
msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
if sqle.getSQLState() is not None:
msg += ' [SQLState: %s]' % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return base.ResultProxy(self)
def create_cursor(self):
cursor = self._connection.connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(base.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, 'name'):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return [self._returning_row]
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type):
self.type = type
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
self.type)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = 'oracle'
jdbc_driver_name = 'oracle.jdbc.OracleDriver'
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{
sqltypes.Date : _ZxJDBCDate,
sqltypes.Numeric: _ZxJDBCNumeric
}
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object, dbtype=None):
if type(object) is ReturningParam:
statement.registerReturnParameter(index, object.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(self, statement, index, object)
else:
OracleDataHandler.setJDBCObject(self, statement, index, object, dbtype)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= '10.2'
def _create_jdbc_url(self, url):
return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
def _get_server_version_info(self, connection):
version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
return tuple(int(x) for x in version.split('.'))
dialect = OracleDialect_zxjdbc
| {
"content_hash": "1dd3c32d41828bbf02ab6ace89a38211",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 97,
"avg_line_length": 35.3732057416268,
"alnum_prop": 0.6067902069525226,
"repo_name": "simplegeo/sqlalchemy",
"id": "d742654a0d0fee70d438484d76ce048bb462848c",
"size": "7393",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/dialects/oracle/zxjdbc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "30110"
},
{
"name": "JavaScript",
"bytes": "26336"
},
{
"name": "Python",
"bytes": "5012225"
}
],
"symlink_target": ""
} |
""" Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
dot, ravel, poly1d, asarray, intp)
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j] - x[k]
pt *= poly1d([1.0, -x[k]]) / fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x) - 1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope * (x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x) - 1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim - 1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis + 1)
self.c = np.rollaxis(self.c, axis + 1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size - 1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
`self.x` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
`self.x` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2 - self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2 - c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2 - self.c.shape[0]:, :c.shape[1]] = c
c2[k2 - c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim + self.axis] + l[:x_ndim] + l[x_ndim + self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,) * (c2.ndim - 1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,) * (c.ndim - 1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t) - 1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y / spec.gamma(m + 1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,) * (bp.c.ndim - 2)
c = np.zeros_like(bp.c)
for a in range(k + 1):
factor = (-1) ** a * comb(k, a) * bp.c[a]
for s in range(a, k + 1):
val = comb(k - a, s - a) * (-1) ** s
c[k - s] += factor * val / dx[(slice(None),) + rest] ** s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,) * (self.c.ndim - 2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None)) + rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k + 1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,) * (c.ndim - 2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:, 1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,) * (pp.c.ndim - 2)
c = np.zeros_like(pp.c)
for a in range(k + 1):
factor = pp.c[a] / comb(k, k - a) * dx[(slice(None),) + rest] ** (k - a)
for j in range(k - a, k + 1):
c[j] += factor * comb(j, k - a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i + 1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i + 1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i] + 1
n1 = min(n // 2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1 + n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i + 1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i + 1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na + nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa) ** q
for j in range(0, q):
c[q] -= (-1) ** (j + q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q - 1] = yb[q] / spec.poch(n - q, q) * (-1) ** q * (xb - xa) ** q
for j in range(0, q):
c[-q - 1] -= (-1) ** (j + 1) * comb(q, j + 1) * c[-q + j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d + 1):
out[a + j] += f * comb(d, j) / comb(k + d, a + j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2 * ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2 * ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2 * ndim])
dim3 = prod(self.c.shape[2 * ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2 * ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)] * ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None] * c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,) * (c.ndim - 1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim + axis] = perm2[ndim + axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu - 1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis + 1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,) * (self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk) - 1
sivals = np.empty((order + 1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m + 1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order - m, :] = res
return cls(sivals, xk, fill=fill)
# The 3 private functions below can be called by splmake().
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk) - 1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K - 1
V2 = vh[-ind:, :].T
V1 = vh[:-ind, :].T
A = dot(J.T, J)
tmp = dot(V2.T, A)
Q = dot(tmp, V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2, p)
tmp = np.eye(N + K) - tmp
tmp = dot(tmp, V1)
tmp = dot(tmp, np.diag(1.0 / s))
tmp = dot(tmp, u.T)
return _dot0(tmp, yk)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# Remove the 3 private functions above as well when removing splmake
@np.deprecate(message="splmake is deprecated in scipy 0.19.0, "
"use make_interp_spline instead.")
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
@np.deprecate(message="spleval is deprecated in scipy 0.19.0, "
"use BSpline instead.")
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj, cvals, k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),) + index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx, xj, cvals.real[sl], k, deriv)
res[sl].imag = _fitpack._bspleval(xx, xj, cvals.imag[sl], k, deriv)
else:
res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv)
res.shape = oldshape + sh
return res
@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, "
"use PPoly.from_spline instead.")
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple."""
return ppform.fromspline(xk, cvals, k)
@np.deprecate(message="spline is deprecated in scipy 0.19.0, "
"use Bspline class instead.")
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)
| {
"content_hash": "7127c173dd087143a7ec24015d99bef7",
"timestamp": "",
"source": "github",
"line_count": 2856,
"max_line_length": 111,
"avg_line_length": 35.3921568627451,
"alnum_prop": 0.5476948951325683,
"repo_name": "DailyActie/Surrogate-Model",
"id": "f21172c26e5961a3f26369c065257788a5bebaea",
"size": "101080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/scipy-master/scipy/interpolate/interpolate.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
import urllib
import re
import smtplib
import string
from xml.dom import minidom
from email.mime.text import MIMEText
WEATHER_URL = 'http://xml.weather.yahoo.com/forecastrss?p=%s'
WEATHER_NS = 'http://xml.weather.yahoo.com/ns/rss/1.0'
RAIN_CODES = (5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 35, 40, 45)
USERS_URL = 'http://www.dailyraincheck.com/users.php?password=redacted' # yes, it's not secure, but this is a quick hack
HOST = 'outgoing.mit.edu'
FROM = 'landa@mit.edu'
def weather_for_zip(zip_code):
url = WEATHER_URL % zip_code
dom = minidom.parse(urllib.urlopen(url))
forecasts = []
for node in dom.getElementsByTagNameNS(WEATHER_NS, 'forecast'):
forecasts.append({
'date': node.getAttribute('date'),
'low': node.getAttribute('low'),
'high': node.getAttribute('high'),
'condition': node.getAttribute('text'),
'code': node.getAttribute('code')
})
ycondition = dom.getElementsByTagNameNS(WEATHER_NS, 'condition')[0]
return {
'current_condition': ycondition.getAttribute('text'),
'current_temp': ycondition.getAttribute('temp'),
'forecasts': forecasts,
'title': dom.getElementsByTagName('title')[0].firstChild.data
}
def validateEmail(email):
if len(email) > 7:
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) != None:
return True
return False
def validatePhone(phone):
if re.match("\\d{11}", phone) != None:
return True
return False
def validateZipcode(zipcode):
if re.match("\\d{5}", zipcode) != None:
return True
return False
users = []
user_list = urllib.urlopen(USERS_URL)
for line in user_list.read().splitlines():
users.append(line.split(','))
server = smtplib.SMTP(HOST)
for user in users:
zipcode, email, phone = user
if not validateZipcode(zipcode): continue
weather = weather_for_zip(zipcode)
forecast = weather["forecasts"][0]
if int(forecast["code"]) in RAIN_CODES:
message = forecast["condition"] + "; hi=" + forecast["high"] + ", lo=" + forecast["low"] + "."
print message
if validateEmail(email):
print "Sending email to", email
body = "Subject: %s\nBrought to you by DailyRainCheck.com" % message
server.sendmail(FROM, [email], body)
print ""
server.quit()
| {
"content_hash": "fd1e809e07e2dcf951a92f53ad7fcb8a",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 120,
"avg_line_length": 32.04225352112676,
"alnum_prop": 0.6558241758241758,
"repo_name": "landa/rain",
"id": "364f9a281cc96c9ef18e6fcac5a8ed729d17ac8d",
"size": "2294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weather/weather.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "579"
},
{
"name": "PHP",
"bytes": "9021"
},
{
"name": "Python",
"bytes": "2294"
}
],
"symlink_target": ""
} |
"""Shared functionality for dtFabric-based data format plist parser plugins."""
import abc
import os
from dtfabric import errors as dtfabric_errors
from dtfabric.runtime import fabric as dtfabric_fabric
from plaso.lib import errors
from plaso.parsers.plist_plugins import interface
class DtFabricBasePlistPlugin(interface.PlistPlugin):
"""Shared functionality for dtFabric-based data format Registry plugins.
A dtFabric-based data format plist parser plugin defines its data format
structures in dtFabric definition file, for example "dtfabric.yaml":
name: int32
type: integer
description: 32-bit signed integer type
attributes:
format: signed
size: 4
units: bytes
---
name: point3d
aliases: [POINT]
type: structure
description: Point in 3 dimensional space.
attributes:
byte_order: little-endian
members:
- name: x
aliases: [XCOORD]
data_type: int32
- name: y
data_type: int32
- name: z
data_type: int32
The path to the definition file is defined in the class constant
"_DEFINITION_FILE" and will be read on class instantiation.
The definition files contains data type definitions such as "int32" and
"point3d" in the previous example.
A data type map can be used to create a Python object that represent the
data type definition mapped to a byte stream, for example if we have the
following byte stream: 01 00 00 00 02 00 00 00 03 00 00 00
The corresponding "point3d" Python object would be: point3d(x=1, y=2, z=3)
A parser that wants to implement a dtFabric-based data format parser needs to:
* define a definition file and override _DEFINITION_FILE;
* implement the ParseFileObject method.
The _GetDataTypeMap method of this class can be used to retrieve data type
maps from the "fabric", which is the collection of the data type definitions
in definition file. Data type maps are cached for reuse.
The _ReadStructure method of this class can be used to read structure data
from a file-like object and create a Python object using a data type map.
"""
# The dtFabric definition file, which must be overwritten by a subclass.
_DEFINITION_FILE = None
# Preserve the absolute path value of __file__ in case it is changed
# at run-time.
_DEFINITION_FILES_PATH = os.path.dirname(__file__)
def __init__(self):
"""Initializes a dtFabric-based data format Registry plugin."""
super(DtFabricBasePlistPlugin, self).__init__()
self._data_type_maps = {}
self._fabric = self._ReadDefinitionFile(self._DEFINITION_FILE)
def _GetDataTypeMap(self, name):
"""Retrieves a data type map defined by the definition file.
The data type maps are cached for reuse.
Args:
name (str): name of the data type as defined by the definition file.
Returns:
dtfabric.DataTypeMap: data type map which contains a data type definition,
such as a structure, that can be mapped onto binary data.
"""
data_type_map = self._data_type_maps.get(name, None)
if not data_type_map:
data_type_map = self._fabric.CreateDataTypeMap(name)
self._data_type_maps[name] = data_type_map
return data_type_map
def _ReadDefinitionFile(self, filename):
"""Reads a dtFabric definition file.
Args:
filename (str): name of the dtFabric definition file.
Returns:
dtfabric.DataTypeFabric: data type fabric which contains the data format
data type maps of the data type definition, such as a structure, that
can be mapped onto binary data or None if no filename is provided.
"""
if not filename:
return None
path = os.path.join(self._DEFINITION_FILES_PATH, filename)
with open(path, 'rb') as file_object:
definition = file_object.read()
return dtfabric_fabric.DataTypeFabric(yaml_definition=definition)
def _ReadStructureFromByteStream(
self, byte_stream, file_offset, data_type_map, context=None):
"""Reads a structure from a byte stream.
Args:
byte_stream (bytes): byte stream.
file_offset (int): offset of the structure data relative to the start
of the file-like object.
data_type_map (dtfabric.DataTypeMap): data type map of the structure.
context (Optional[dtfabric.DataTypeMapContext]): data type map context.
The context is used within dtFabric to hold state about how to map
the data type definition onto the byte stream. In this class it is
used to determine the size of variable size data type definitions.
Returns:
object: structure values object.
Raises:
ParseError: if the structure cannot be read.
ValueError: if file-like object or data type map is missing.
"""
if not byte_stream:
raise ValueError('Missing byte stream.')
if not data_type_map:
raise ValueError('Missing data type map.')
try:
return data_type_map.MapByteStream(byte_stream, context=context)
except (dtfabric_errors.ByteStreamTooSmallError,
dtfabric_errors.MappingError) as exception:
raise errors.ParseError((
'Unable to map {0:s} data at offset: 0x{1:08x} with error: '
'{2!s}').format(data_type_map.name or '', file_offset, exception))
@abc.abstractmethod
def GetEntries(
self, parser_mediator, match=None, top_level=None, **unused_kwargs):
"""Extracts event objects from the values of entries within a plist.
This is the main method that a plist plugin needs to implement.
The contents of the plist keys defined in PLIST_KEYS will be made available
to the plugin as self.matched{'KEY': 'value'}. The plugin should implement
logic to parse this into a useful event for incorporation into the Plaso
timeline.
For example if you want to note the timestamps of when devices were
LastInquiryUpdated you would need to examine the bluetooth config file
called 'com.apple.bluetooth' and need to look at devices under the key
'DeviceCache'. To do this the plugin needs to define:
PLIST_PATH_FILTERS = frozenset([
interface.PlistPathFilter('com.apple.bluetooth')])
PLIST_KEYS = frozenset(['DeviceCache']).
When a file with this key is encountered during processing self.matched is
populated and the plugin's GetEntries() is called. The plugin would have
self.matched = {'DeviceCache': [{'DE:AD:BE:EF:01': {'LastInquiryUpdate':
DateTime_Object}, 'DE:AD:BE:EF:01': {'LastInquiryUpdate':
DateTime_Object}'...}]} and needs to implement logic here to extract
values, format, and produce the data as a event.PlistEvent.
The attributes for a PlistEvent should include the following:
root = Root key this event was extracted from. E.g. DeviceCache/
key = Key the value resided in. E.g. 'DE:AD:BE:EF:01'
time = Date this artifact was created in number of micro seconds
(usec) since January 1, 1970, 00:00:00 UTC.
desc = Short description. E.g. 'Device LastInquiryUpdated'
See plist/bluetooth.py for the implemented example plugin.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
top_level (Optional[dict[str, object]]): plist top-level item.
"""
| {
"content_hash": "759a71381195a91ed3910d545189ee03",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 80,
"avg_line_length": 38.04123711340206,
"alnum_prop": 0.7069105691056911,
"repo_name": "kiddinn/plaso",
"id": "30335e6a6ece9bf9ce100cad0ebd4975a594bb0e",
"size": "7404",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plaso/parsers/plist_plugins/dtfabric_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "68"
},
{
"name": "PowerShell",
"bytes": "9560"
},
{
"name": "Python",
"bytes": "4878625"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "26453"
}
],
"symlink_target": ""
} |
import copy
from rpython.rlib import jit
from rpython.rlib.objectmodel import specialize
from topaz.celldict import CellDict, VersionTag
from topaz.coerce import Coerce
from topaz.module import ClassDef, check_frozen
from topaz.objects.functionobject import W_FunctionObject
from topaz.objects.objectobject import W_RootObject
from topaz.objects.procobject import W_ProcObject
from topaz.scope import StaticScope
class AttributeReader(W_FunctionObject):
_immutable_fields_ = ["varname"]
def __init__(self, varname):
W_FunctionObject.__init__(self, varname)
self.varname = varname
def __deepcopy__(self, memo):
obj = super(W_FunctionObject, self).__deepcopy__(memo)
obj.varname = self.varname
return obj
def call(self, space, w_obj, args_w, block):
return space.find_instance_var(w_obj, self.varname) or space.w_nil
class AttributeWriter(W_FunctionObject):
_immutable_fields_ = ["varname"]
def __init__(self, varname):
W_FunctionObject.__init__(self, varname)
self.varname = varname
def __deepcopy__(self, memo):
obj = super(W_FunctionObject, self).__deepcopy__(memo)
obj.varname = self.varname
return obj
def call(self, space, w_obj, args_w, block):
[w_value] = args_w
space.set_instance_var(w_obj, self.varname, w_value)
return w_value
def arity(self, space):
return space.newint(1)
class UndefMethod(W_FunctionObject):
_immutable_fields_ = ["name"]
def __init__(self, name):
W_FunctionObject.__init__(self, name)
self.name = name
def call(self, space, w_obj, args_w, block):
args_w.insert(0, space.newsymbol(self.name))
return space.send(w_obj, "method_missing", args_w, block)
class DefineMethodBlock(W_FunctionObject):
_immutable_fields_ = ["name", "block"]
def __init__(self, name, block):
W_FunctionObject.__init__(self, name)
self.name = name
self.block = block
def call(self, space, w_obj, args_w, block):
from topaz.interpreter import RaiseReturn
method_block = self.block.copy(space, w_self=w_obj, is_lambda=True)
try:
return space.invoke_block(method_block, args_w, block)
except RaiseReturn as e:
return e.w_value
def arity(self, space):
return space.newint(self.block.bytecode.arity(negative_defaults=True))
class DefineMethodMethod(W_FunctionObject):
_immutable_fields_ = ["name", "w_unbound_method"]
def __init__(self, name, w_unbound_method):
W_FunctionObject.__init__(self, name)
self.name = name
self.w_unbound_method = w_unbound_method
def call(self, space, w_obj, args_w, block):
w_bound_method = space.send(self.w_unbound_method, "bind", [w_obj])
return space.send(w_bound_method, "call", args_w, block)
class W_Autoload(W_RootObject):
def __init__(self, space, path):
self.space = space
self.path = path
def load(self):
self.space.send(
self.space.w_kernel,
"require",
[self.space.newstr_fromstr(self.path)]
)
class W_ModuleObject(W_RootObject):
_immutable_fields_ = ["version?", "included_modules?[*]", "klass?", "name?"]
classdef = ClassDef("Module", W_RootObject.classdef)
def __init__(self, space, name, klass=None):
self.name = name
self.klass = klass
self.version = VersionTag()
self.methods_w = {}
self.constants_w = {}
self.class_variables = CellDict()
self.instance_variables = CellDict()
self.flags = CellDict()
self.included_modules = []
self.descendants = []
def __deepcopy__(self, memo):
obj = super(W_ModuleObject, self).__deepcopy__(memo)
obj.name = self.name
obj.klass = copy.deepcopy(self.klass, memo)
obj.version = copy.deepcopy(self.version, memo)
obj.methods_w = copy.deepcopy(self.methods_w, memo)
obj.constants_w = copy.deepcopy(self.constants_w, memo)
obj.class_variables = copy.deepcopy(self.class_variables, memo)
obj.instance_variables = copy.deepcopy(self.instance_variables, memo)
obj.flags = copy.deepcopy(self.flags, memo)
obj.included_modules = copy.deepcopy(self.included_modules, memo)
obj.descendants = copy.deepcopy(self.descendants, memo)
return obj
def getclass(self, space):
if self.klass is not None:
return jit.promote(self).klass
return W_RootObject.getclass(self, space)
def getsingletonclass(self, space):
if self.klass is None or not self.klass.is_singleton:
self.klass = space.newclass(
"#<Class:%s>" % self.name, self.klass or space.w_module, is_singleton=True, attached=self
)
return self.klass
def mutated(self):
self.version = VersionTag()
def define_method(self, space, name, method):
if (name == "initialize" or name == "initialize_copy" or
method.visibility == W_FunctionObject.MODULE_FUNCTION):
method.update_visibility(W_FunctionObject.PRIVATE)
self.mutated()
self.methods_w[name] = method
if not space.bootstrap:
if isinstance(method, UndefMethod):
self.method_undefined(space, space.newsymbol(name))
else:
self.method_added(space, space.newsymbol(name))
@jit.unroll_safe
def find_method(self, space, name):
method = self._find_method_pure(space, name, self.version)
if method is None:
for module in self.included_modules:
method = module.find_method(space, name)
if method is not None:
return method
return method
@jit.unroll_safe
def find_method_super(self, space, name):
for module in self.included_modules:
method = module.find_method(space, name)
if method is not None:
return method
return None
@jit.elidable
def _find_method_pure(self, space, method, version):
return self.methods_w.get(method, None)
@specialize.argtype(2)
def methods(self, space, visibility=None, inherit=True):
methods = {}
for name, method in self.methods_w.iteritems():
if (not isinstance(method, UndefMethod) and
(visibility is None or method.visibility == visibility)):
methods[name] = None
if inherit:
for w_mod in self.included_modules:
for name in w_mod.methods(space, visibility=visibility):
method = self._find_method_pure(space, name, self.version)
if method is None or not isinstance(method, UndefMethod):
methods[name] = None
return methods.keys()
def set_const(self, space, name, w_obj):
self.mutated()
self.constants_w[name] = w_obj
if isinstance(w_obj, W_ModuleObject) and w_obj.name is None and self.name is not None:
w_obj.set_name_in_scope(space, name, self)
def find_const(self, space, name, autoload=True):
w_res = self.find_included_const(space, name, autoload=autoload)
if w_res is None:
return space.w_object.find_const(space, name, autoload=autoload)
else:
return w_res
@jit.unroll_safe
def find_included_const(self, space, name, autoload=True):
w_res = self.find_local_const(space, name, autoload=autoload)
if w_res is None:
for w_mod in self.included_modules:
w_res = w_mod.find_local_const(space, name, autoload=autoload)
if w_res is not None:
break
return w_res
def included_constants(self, space):
consts = {}
for const in self.constants_w:
consts[const] = None
for w_mod in self.included_modules:
for const in w_mod.included_constants(space):
consts[const] = None
return consts.keys()
def lexical_constants(self, space):
consts = {}
frame = space.getexecutioncontext().gettoprubyframe()
scope = frame.lexical_scope
while scope is not None:
assert isinstance(scope, W_ModuleObject)
for const in scope.w_mod.constants_w:
consts[const] = None
scope = scope.backscope
return consts.keys()
def local_constants(self, space):
return self.constants_w.keys()
def inherited_constants(self, space):
return self.local_constants(space)
def find_local_const(self, space, name, autoload=True):
w_res = self._find_const_pure(name, self.version)
if autoload and isinstance(w_res, W_Autoload):
self.constants_w[name] = None
try:
w_res.load()
finally:
w_new_res = self.constants_w.get(name, None)
if not w_res:
self.constants_w[name] = w_res
w_res = w_new_res
return w_res
else:
return w_res
@jit.elidable
def _find_const_pure(self, name, version):
return self.constants_w.get(name, None)
@jit.unroll_safe
def set_class_var(self, space, name, w_obj):
for module in reversed(self.ancestors()):
assert isinstance(module, W_ModuleObject)
w_res = module.class_variables.get(space, name)
if w_res is not None or module is self:
module.class_variables.set(space, name, w_obj)
if module is self:
for descendant in self.descendants:
descendant.remove_class_var(space, name)
@jit.unroll_safe
def find_class_var(self, space, name):
w_res = self.class_variables.get(space, name)
if w_res is None:
ancestors = self.ancestors()
for idx in xrange(1, len(ancestors)):
module = ancestors[idx]
assert isinstance(module, W_ModuleObject)
w_res = module.class_variables.get(space, name)
if w_res is not None:
break
return w_res
@jit.unroll_safe
def remove_class_var(self, space, name):
self.class_variables.delete(name)
for descendant in self.descendants:
descendant.remove_class_var(space, name)
def set_instance_var(self, space, name, w_value):
return self.instance_variables.set(space, name, w_value)
def find_instance_var(self, space, name):
return self.instance_variables.get(space, name)
def copy_instance_vars(self, space, w_other):
assert isinstance(w_other, W_ModuleObject)
for key in w_other.instance_variables:
w_value = w_other.instance_variables.get(space, key)
self.set_instance_var(space, key, w_value)
def set_flag(self, space, name):
self.flags.set(space, name, space.w_true)
def unset_flag(self, space, name):
self.flags.set(space, name, space.w_false)
def get_flag(self, space, name):
return self.flags.get(space, name) or space.w_false
def ancestors(self, include_singleton=True, include_self=True):
if include_self:
return [self] + self.included_modules
else:
return self.included_modules[:]
@jit.unroll_safe
def is_ancestor_of(self, w_cls):
if self is w_cls:
return True
for w_mod in w_cls.included_modules:
if self is w_mod:
return True
if w_cls.superclass is not None:
return self.is_ancestor_of(w_cls.superclass)
return False
def include_module(self, space, w_mod):
assert isinstance(w_mod, W_ModuleObject)
if w_mod not in self.ancestors():
self.included_modules = [w_mod] + self.included_modules
w_mod.included(space, self)
def included(self, space, w_mod):
self.descendants.append(w_mod)
if space.respond_to(self, "included"):
space.send(self, "included", [w_mod])
def extend_object(self, space, w_mod):
if self not in w_mod.ancestors():
self.descendants.append(w_mod)
w_mod.included_modules = [self] + w_mod.included_modules
def set_visibility(self, space, names_w, visibility):
names = [space.symbol_w(w_name) for w_name in names_w]
if names:
for name in names:
self.set_method_visibility(space, name, visibility)
else:
self.set_default_visibility(space, visibility)
def set_default_visibility(self, space, visibility):
frame = space.getexecutioncontext().gettoprubyframe()
frame.visibility = visibility
def set_method_visibility(self, space, name, visibility):
w_method = self.find_method(space, name)
if w_method is None or isinstance(w_method, UndefMethod):
w_method = space.w_object.find_method(space, name)
if w_method is None or isinstance(w_method, UndefMethod):
cls_name = space.obj_to_s(self)
raise space.error(space.w_NameError,
"undefined method `%s' for class `%s'" % (name, cls_name)
)
w_method.update_visibility(visibility)
def method_added(self, space, w_name):
space.send(self, "method_added", [w_name])
def method_undefined(self, space, w_name):
space.send(self, "method_undefined", [w_name])
def method_removed(self, space, w_name):
space.send(self, "method_removed", [w_name])
def set_name_in_scope(self, space, name, w_scope):
self.name = space.buildname(name, w_scope)
for name, w_const in self.constants_w.iteritems():
if isinstance(w_const, W_ModuleObject):
w_const.set_name_in_scope(space, name, self)
@classdef.singleton_method("nesting")
def singleton_method_nesting(self, space):
frame = space.getexecutioncontext().gettoprubyframe()
modules_w = []
scope = frame.lexical_scope
while scope is not None:
modules_w.append(scope.w_mod)
scope = scope.backscope
return space.newarray(modules_w)
@classdef.singleton_method("allocate")
def method_allocate(self, space):
return W_ModuleObject(space, None, self)
@classdef.method("initialize")
def method_initialize(self, space, block):
if block is not None:
space.send(self, "module_exec", [self], block)
@classdef.method("to_s")
def method_to_s(self, space):
name = self.name
if name is None:
return space.newstr_fromstr(space.any_to_s(self))
return space.newstr_fromstr(name)
@classdef.method("include")
def method_include(self, space, args_w):
for w_mod in args_w:
if type(w_mod) is not W_ModuleObject:
raise space.error(
space.w_TypeError,
"wrong argument type %s (expected Module)" % space.obj_to_s(space.getclass(w_mod))
)
for w_mod in reversed(args_w):
space.send(w_mod, "append_features", [self])
return self
@classdef.method("include?")
def method_includep(self, space, w_mod):
if type(w_mod) is not W_ModuleObject:
raise space.error(
space.w_TypeError,
"wrong argument type %s (expected Module)" % space.obj_to_s(space.getclass(w_mod))
)
if w_mod is self:
return space.w_false
return space.newbool(w_mod in self.ancestors())
@classdef.method("append_features")
def method_append_features(self, space, w_mod):
if w_mod in self.ancestors():
raise space.error(space.w_ArgumentError, "cyclic include detected")
if type(self) is not W_ModuleObject:
raise space.error(space.w_TypeError, "wrong argument type")
for module in reversed(self.ancestors()):
w_mod.include_module(space, module)
@classdef.method("define_method", name="symbol")
@check_frozen()
def method_define_method(self, space, name, w_method=None, block=None):
if w_method is not None:
if space.is_kind_of(w_method, space.w_method):
w_method = space.send(w_method, "unbind")
if space.is_kind_of(w_method, space.w_unbound_method):
self.define_method(space, name, DefineMethodMethod(name, w_method))
return w_method
elif space.is_kind_of(w_method, space.w_proc):
assert isinstance(w_method, W_ProcObject)
self.define_method(space, name, DefineMethodBlock(name, w_method))
return w_method.copy(space, is_lambda=True)
else:
raise space.error(space.w_TypeError,
"wrong argument type %s (expected Proc/Method)" % space.obj_to_s(space.getclass(w_method))
)
elif block is not None:
self.define_method(space, name, DefineMethodBlock(name, block))
return block.copy(space, is_lambda=True)
else:
raise space.error(space.w_ArgumentError, "tried to create Proc object without a block")
@classdef.method("attr_accessor")
def method_attr_accessor(self, space, args_w):
self.method_attr_reader(space, args_w)
self.method_attr_writer(space, args_w)
@classdef.method("attr_reader")
def method_attr_reader(self, space, args_w):
for w_arg in args_w:
varname = Coerce.symbol(space, w_arg)
self.define_method(space, varname, AttributeReader("@" + varname))
@classdef.method("attr_writer")
def method_attr_writer(self, space, args_w):
for w_arg in args_w:
varname = Coerce.symbol(space, w_arg)
self.define_method(space, varname + "=", AttributeWriter("@" + varname))
@classdef.method("attr")
def method_attr(self, space, args_w):
if len(args_w) == 2 and (args_w[1] is space.w_true or args_w[1] is space.w_false):
[w_name, w_writable] = args_w
if space.is_true(w_writable):
self.method_attr_accessor(space, [w_name])
else:
self.method_attr_reader(space, [w_name])
else:
self.method_attr_reader(space, args_w)
@classdef.method("module_function")
def method_module_function(self, space, args_w):
if not args_w:
self.set_default_visibility(space, W_FunctionObject.MODULE_FUNCTION)
return self
for w_arg in args_w:
name = Coerce.symbol(space, w_arg)
w_method = self.find_method(space, name)
if w_method is None or isinstance(w_method, UndefMethod):
cls_name = space.obj_to_s(self)
raise space.error(space.w_NameError,
"undefined method `%s' for class `%s'" % (name, cls_name)
)
self.attach_method(space, name, w_method)
self.set_method_visibility(space, name, W_FunctionObject.PRIVATE)
return self
@classdef.method("private_class_method")
def method_private_class_method(self, space, args_w):
w_cls = self.getsingletonclass(space)
return space.send(w_cls, "private", args_w)
@classdef.method("public_class_method")
def method_public_class_method(self, space, args_w):
w_cls = self.getsingletonclass(space)
return space.send(w_cls, "public", args_w)
@classdef.method("alias_method", new_name="symbol", old_name="symbol")
@check_frozen()
def method_alias_method(self, space, new_name, old_name):
w_method = self.find_method(space, old_name)
if w_method is None:
w_method = space.w_object.find_method(space, old_name)
if w_method is None or isinstance(w_method, UndefMethod):
cls_name = space.obj_to_s(self)
raise space.error(space.w_NameError,
"undefined method `%s' for class `%s'" % (old_name, cls_name)
)
self.define_method(space, new_name, w_method)
return self
@classdef.method("ancestors")
def method_ancestors(self, space):
return space.newarray(self.ancestors(include_singleton=False))
@classdef.method("included")
def method_included(self, space, w_mod):
# TODO: should be private
pass
@classdef.method("extended")
def method_extended(self, space, w_mod):
# TODO: should be private
pass
@classdef.method("extend_object")
def method_extend_object(self, space, w_obj):
if type(self) is not W_ModuleObject:
raise space.error(space.w_TypeError, "wrong argument type")
self.extend_object(space, space.getsingletonclass(w_obj))
@classdef.method("name")
def method_name(self, space):
if self.name is None:
return space.w_nil
return space.newstr_fromstr(self.name)
@classdef.method("private")
def method_private(self, space, args_w):
self.set_visibility(space, args_w, W_FunctionObject.PRIVATE)
return self
@classdef.method("public")
def method_public(self, space, args_w):
self.set_visibility(space, args_w, W_FunctionObject.PUBLIC)
return self
@classdef.method("protected")
def method_protected(self, space, args_w):
self.set_visibility(space, args_w, W_FunctionObject.PROTECTED)
return self
@classdef.method("private_constant")
def method_private_constant(self, space, args_w):
pass
@classdef.method("constants")
def method_constants(self, space, w_inherit=None):
if self is space.w_module and w_inherit is None:
consts = {}
for const in self.lexical_constants(space):
consts[const] = None
for const in self.inherited_constants(space):
consts[const] = None
return space.newarray([space.newsymbol(n) for n in consts])
if w_inherit is None or space.is_true(w_inherit):
return space.newarray([space.newsymbol(n) for n in self.included_constants(space)])
else:
return space.newarray([space.newsymbol(n) for n in self.constants_w])
@classdef.method("const_missing", name="symbol")
def method_const_missing(self, space, name):
if self is space.w_object:
raise space.error(space.w_NameError, "uninitialized constant %s" % (name))
else:
self_name = space.obj_to_s(self)
raise space.error(space.w_NameError, "uninitialized constant %s::%s" % (self_name, name))
@classdef.method("class_eval", string="str", filename="str")
@classdef.method("module_eval", string="str", filename="str")
def method_module_eval(self, space, string=None, filename=None, w_lineno=None, block=None):
if string is not None and block is not None:
raise space.error(space.w_ArgumentError, "wrong number of arguments")
if string is not None:
if filename is None:
filename = "module_eval"
if w_lineno is not None:
lineno = space.int_w(w_lineno)
else:
lineno = 1
return space.execute(string, self, lexical_scope=StaticScope(self, None), filepath=filename, initial_lineno=lineno)
elif block is None:
raise space.error(space.w_ArgumentError, "block not supplied")
else:
return space.invoke_block(block.copy(space, w_self=self, lexical_scope=StaticScope(self, block.lexical_scope)), [])
@classdef.method("const_defined?", const="str", inherit="bool")
def method_const_definedp(self, space, const, inherit=True):
space._check_const_name(const)
if inherit:
return space.newbool(self.find_const(space, const, autoload=False) is not None)
else:
return space.newbool(self.find_local_const(space, const, autoload=False) is not None)
@classdef.method("const_get", const="symbol", inherit="bool")
def method_const_get(self, space, const, inherit=True):
space._check_const_name(const)
if inherit:
w_res = self.find_const(space, const)
else:
w_res = self.find_local_const(space, const)
if w_res is None:
return space.send(self, "const_missing", [space.newsymbol(const)])
return w_res
@classdef.method("const_set", const="symbol")
@check_frozen()
def method_const_set(self, space, const, w_value):
space.set_const(self, const, w_value)
return w_value
@classdef.method("remove_const", name="str")
def method_remove_const(self, space, name):
space._check_const_name(name)
w_res = self.find_local_const(space, name, autoload=False)
if w_res is None:
self_name = space.obj_to_s(self)
raise space.error(space.w_NameError,
"uninitialized constant %s::%s" % (self_name, name)
)
del self.constants_w[name]
self.mutated()
return w_res
@classdef.method("class_variable_defined?", name="symbol")
def method_class_variable_definedp(self, space, name):
return space.newbool(self.find_class_var(space, name) is not None)
@classdef.method("class_variable_get", name="symbol")
def method_class_variable_get(self, space, name):
return space.find_class_var(self, name)
@classdef.method("class_variable_set", name="symbol")
@check_frozen()
def method_class_variable_set(self, space, name, w_value):
self.set_class_var(space, name, w_value)
return w_value
@classdef.method("remove_class_variable", name="symbol")
def method_remove_class_variable(self, space, name):
w_value = self.class_variables.get(space, name)
if w_value is not None:
self.class_variables.delete(name)
return w_value
if self.find_class_var(space, name) is not None:
raise space.error(space.w_NameError,
"cannot remove %s for %s" % (name, space.obj_to_s(self))
)
raise space.error(space.w_NameError,
"class variable %s not defined for %s" % (name, space.obj_to_s(self))
)
@classdef.method("method_defined?", name="str")
def method_method_definedp(self, space, name):
return space.newbool(self.find_method(space, name) is not None)
@classdef.method("===")
def method_eqeqeq(self, space, w_obj):
return space.newbool(self.is_ancestor_of(space.getclass(w_obj)))
@classdef.method("<=")
def method_lte(self, space, w_other):
if not isinstance(w_other, W_ModuleObject):
raise space.error(space.w_TypeError, "compared with non class/module")
for w_mod in self.ancestors():
if w_other is w_mod:
return space.w_true
for w_mod in w_other.ancestors():
if self is w_mod:
return space.w_false
return space.w_nil
@classdef.method("<")
def method_lt(self, space, w_other):
if self is w_other:
return space.w_false
return space.send(self, "<=", [w_other])
@classdef.method(">=")
def method_gte(self, space, w_other):
if not isinstance(w_other, W_ModuleObject):
raise space.error(space.w_TypeError, "compared with non class/module")
return space.send(w_other, "<=", [self])
@classdef.method(">")
def method_gt(self, space, w_other):
if not isinstance(w_other, W_ModuleObject):
raise space.error(space.w_TypeError, "compared with non class/module")
if self is w_other:
return space.w_false
return space.send(w_other, "<=", [self])
@classdef.method("<=>")
def method_comparison(self, space, w_other):
if not isinstance(w_other, W_ModuleObject):
return space.w_nil
if self is w_other:
return space.newint(0)
other_is_subclass = space.send(self, "<", [w_other])
if space.is_true(other_is_subclass):
return space.newint(-1)
elif other_is_subclass is space.w_nil:
return space.w_nil
else:
return space.newint(1)
@classdef.method("instance_method", name="symbol")
def method_instance_method(self, space, name):
return space.newmethod(name, self)
@classdef.method("instance_methods", inherit="bool")
def method_instance_methods(self, space, inherit=True):
return space.newarray([
space.newsymbol(sym)
for sym in self.methods(space, inherit=inherit)
])
@classdef.method("public_instance_methods", inherit="bool")
def method_public_instance_methods(self, space, inherit=True):
return space.newarray([
space.newsymbol(sym)
for sym in self.methods(space, visibility=W_FunctionObject.PUBLIC, inherit=inherit)
])
@classdef.method("protected_instance_methods", inherit="bool")
def method_protected_instance_methods(self, space, inherit=True):
return space.newarray([
space.newsymbol(sym)
for sym in self.methods(space, visibility=W_FunctionObject.PROTECTED, inherit=inherit)
])
@classdef.method("private_instance_methods", inherit="bool")
def method_private_instance_methods(self, space, inherit=True):
return space.newarray([
space.newsymbol(sym)
for sym in self.methods(space, visibility=W_FunctionObject.PRIVATE, inherit=inherit)
])
@classdef.method("undef_method", name="symbol")
def method_undef_method(self, space, name):
w_method = self.find_method(space, name)
if w_method is None or isinstance(w_method, UndefMethod):
cls_name = space.obj_to_s(self)
raise space.error(space.w_NameError,
"undefined method `%s' for class `%s'" % (name, cls_name)
)
self.define_method(space, name, UndefMethod(name))
return self
@classdef.method("remove_method", name="symbol")
@check_frozen()
def method_remove_method(self, space, name):
w_method = self._find_method_pure(space, name, self.version)
if w_method is None or isinstance(w_method, UndefMethod):
cls_name = space.obj_to_s(self)
raise space.error(space.w_NameError,
"method `%s' not defined in %s" % (name, cls_name)
)
del self.methods_w[name]
self.mutated()
self.method_removed(space, space.newsymbol(name))
return self
def method_removed(self, space, w_name):
space.send(self, "method_removed", [w_name])
@classdef.method("method_added")
def method_method_added(self, space, w_name):
return space.w_nil
@classdef.method("method_undefined")
def method_method_undefined(self, space, w_name):
return space.w_nil
@classdef.method("method_removed")
def method_method_removed(self, space, w_name):
return space.w_nil
@classdef.method("autoload", name="symbol", path="path")
def method_autoload(self, space, name, path):
if len(path) == 0:
raise space.error(space.w_ArgumentError, "empty file name")
if not self.find_const(space, name):
space.set_const(self, name, W_Autoload(space, path))
return space.w_nil
@classdef.method("autoload?", name="symbol")
def method_autoload(self, space, name):
w_autoload = self.constants_w.get(name, None)
if isinstance(w_autoload, W_Autoload):
return space.newstr_fromstr(w_autoload.path)
else:
return space.w_nil
@classdef.method("class_exec")
@classdef.method("module_exec")
def method_module_exec(self, space, args_w, block):
if block is None:
raise space.error(space.w_LocalJumpError, "no block given")
return space.invoke_block(
block.copy(
space,
w_self=self,
lexical_scope=StaticScope(self, None)
),
args_w
)
| {
"content_hash": "c61c34813680db0bc7981f6b1a53f92e",
"timestamp": "",
"source": "github",
"line_count": 862,
"max_line_length": 127,
"avg_line_length": 37.72389791183295,
"alnum_prop": 0.6053877852266437,
"repo_name": "topazproject/topaz",
"id": "6c3b95d3d52a59d3102995c952322bb3f5cdb3ca",
"size": "32518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topaz/objects/moduleobject.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "43276"
},
{
"name": "Makefile",
"bytes": "6366"
},
{
"name": "Python",
"bytes": "1339171"
},
{
"name": "Ruby",
"bytes": "439997"
},
{
"name": "Shell",
"bytes": "2681"
}
],
"symlink_target": ""
} |
import os
import time
import signal
import requests
from headers import headers
from db.redis_db import Urls
from db.redis_db import Cookies
from logger.log import crawler, other
from db.login_info import freeze_account
from utils.email_warning import send_email
from page_parse.basic import is_403, is_404, is_complete
from decorators.decorator import timeout_decorator, timeout
from config.conf import get_timeout, get_crawl_interal, get_excp_interal, get_max_retries
time_out = get_timeout()
interal = get_crawl_interal()
max_retries = get_max_retries()
excp_interal = get_excp_interal()
def is_banned(url):
if 'unfreeze' in url or 'accessdeny' in url or 'userblock' in url:
return True
return False
@timeout(200)
@timeout_decorator
def get_page(url, user_verify=True, need_login=True, proxys={}):
"""
:param url: url to be crawled
:param user_verify: if it's ajax url, the value is False, else True
:param need_login: if the url is need to login, the value is True, else False
:return: return '' if exception happens or status_code != 200
"""
crawler.info('the crawling url is {url}'.format(url=url))
count = 0
while count < max_retries:
if need_login:
name_cookies = Cookies.fetch_cookies()
if name_cookies is None:
crawler.warning('no cookies in cookies pool, please find out the reason')
send_email()
os.kill(os.getppid(), signal.SIGTERM)
try:
if need_login:
resp = requests.get(url, headers=headers, cookies=name_cookies[1], timeout=time_out, verify=False)
if "$CONFIG['islogin'] = '0'" in resp.text:
crawler.warning('account {} has been banned'.format(name_cookies[0]))
freeze_account(name_cookies[0], 0)
Cookies.delete_cookies(name_cookies[0])
continue
else:
# resp = requests.get(url, headers=headers, timeout=time_out, verify=False)
# test for proxy
# resp = requests.get(url, headers=headers, timeout=time_out, verify=False, proxies=proxys)
resp = requests.get(url, headers=headers, timeout=time_out, proxies=proxys)
# end
page = resp.text
if page:
page = page.encode('utf-8', 'ignore').decode('utf-8')
else:
continue
# slow down to aviod being banned
time.sleep(interal)
if user_verify:
if is_banned(resp.url) or is_403(page):
crawler.warning('account {} has been banned'.format(name_cookies[0]))
freeze_account(name_cookies[0], 0)
Cookies.delete_cookies(name_cookies[0])
count += 1
continue
if 'verifybmobile' in resp.url:
crawler.warning('account {} has been locked,you should use your phone to unlock it'.
format(name_cookies[0]))
freeze_account(name_cookies[0], -1)
Cookies.delete_cookies(name_cookies[0])
continue
if not is_complete(page):
count += 1
continue
if is_404(page):
crawler.warning('{url} seems to be 404'.format(url=url))
return ''
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError, AttributeError) as e:
crawler.warning('excepitons happens when crawling {},specific infos are {}'.format(url, e))
count += 1
time.sleep(excp_interal)
else:
# Urls.store_crawl_url(url, 1)
return page
crawler.warning('max tries for {},check the url in redis db2'.format(url))
# Urls.store_crawl_url(url, 0)
return ''
__all__ = ['get_page'] | {
"content_hash": "e3799eda410e88ec169823c5e2265a07",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 114,
"avg_line_length": 35.857142857142854,
"alnum_prop": 0.5742031872509961,
"repo_name": "KingOfBanana/SocialNetworkAI",
"id": "0991173f8bd2a8cdafc8e0207c7ca2641b24861a",
"size": "4037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "page_get/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2240072"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
} |
import os
from loguru import logger
from requests import Session
from requests.exceptions import RequestException
from flexget import plugin
from flexget.event import event
from flexget.utils.template import RenderError
logger = logger.bind(name='qbittorrent')
class OutputQBitTorrent:
"""
Example:
qbittorrent:
username: <USERNAME> (default: (none))
password: <PASSWORD> (default: (none))
host: <HOSTNAME> (default: localhost)
port: <PORT> (default: 8080)
use_ssl: <SSL> (default: False)
verify_cert: <VERIFY> (default: True)
path: <OUTPUT_DIR> (default: (none))
label: <LABEL> (default: (none))
maxupspeed: <torrent upload speed limit> (default: 0)
maxdownspeed: <torrent download speed limit> (default: 0)
add_paused: <ADD_PAUSED> (default: False)
"""
schema = {
'anyOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'host': {'type': 'string'},
'port': {'type': 'integer'},
'use_ssl': {'type': 'boolean'},
'verify_cert': {'type': 'boolean'},
'path': {'type': 'string'},
'label': {'type': 'string'},
'maxupspeed': {'type': 'integer'},
'maxdownspeed': {'type': 'integer'},
'fail_html': {'type': 'boolean'},
'add_paused': {'type': 'boolean'},
},
'additionalProperties': False,
},
]
}
def __init__(self):
super().__init__()
self.session = Session()
self.api_url_login = None
self.api_url_upload = None
self.api_url_download = None
self.url = None
self.connected = False
def _request(self, method, url, msg_on_fail=None, **kwargs):
try:
response = self.session.request(method, url, **kwargs)
if response.text == "Ok.":
return response
msg = msg_on_fail if msg_on_fail else f'Failure. URL: {url}, data: {kwargs}'
except RequestException as e:
msg = str(e)
raise plugin.PluginError(f'Error when trying to send request to qBittorrent: {msg}')
def check_api_version(self, msg_on_fail, verify=True):
try:
url = self.url + "/api/v2/app/webapiVersion"
response = self.session.request('get', url, verify=verify)
if response.status_code != 404:
self.api_url_login = '/api/v2/auth/login'
self.api_url_upload = '/api/v2/torrents/add'
self.api_url_download = '/api/v2/torrents/add'
return response
url = self.url + "/version/api"
response = self.session.request('get', url, verify=verify)
if response.status_code != 404:
self.api_url_login = '/login'
self.api_url_upload = '/command/upload'
self.api_url_download = '/command/download'
return response
msg = 'Failure. URL: {}'.format(url) if not msg_on_fail else msg_on_fail
except RequestException as e:
msg = str(e)
raise plugin.PluginError(
'Error when trying to send request to qBittorrent: {}'.format(msg)
)
def connect(self, config):
"""
Connect to qBittorrent Web UI. Username and password not necessary
if 'Bypass authentication for localhost' is checked and host is
'localhost'.
"""
self.url = '{}://{}:{}'.format(
'https' if config['use_ssl'] else 'http', config['host'], config['port']
)
self.check_api_version('Check API version failed.', verify=config['verify_cert'])
if config.get('username') and config.get('password'):
data = {'username': config['username'], 'password': config['password']}
self._request(
'post',
self.url + self.api_url_login,
data=data,
msg_on_fail='Authentication failed.',
verify=config['verify_cert'],
)
logger.debug('Successfully connected to qBittorrent')
self.connected = True
def add_torrent_file(self, file_path, data, verify_cert):
if not self.connected:
raise plugin.PluginError('Not connected.')
multipart_data = {k: (None, v) for k, v in data.items()}
with open(file_path, 'rb') as f:
multipart_data['torrents'] = f
self._request(
'post',
self.url + self.api_url_upload,
msg_on_fail='Failed to add file to qBittorrent',
files=multipart_data,
verify=verify_cert,
)
logger.debug('Added torrent file {} to qBittorrent', file_path)
def add_torrent_url(self, url, data, verify_cert):
if not self.connected:
raise plugin.PluginError('Not connected.')
data['urls'] = url
multipart_data = {k: (None, v) for k, v in data.items()}
self._request(
'post',
self.url + self.api_url_download,
msg_on_fail='Failed to add file to qBittorrent',
files=multipart_data,
verify=verify_cert,
)
logger.debug('Added url {} to qBittorrent', url)
@staticmethod
def prepare_config(config):
if isinstance(config, bool):
config = {'enabled': config}
config.setdefault('enabled', True)
config.setdefault('host', 'localhost')
config.setdefault('port', 8080)
config.setdefault('use_ssl', False)
config.setdefault('verify_cert', True)
config.setdefault('label', '')
config.setdefault('maxupspeed', 0)
config.setdefault('maxdownspeed', 0)
config.setdefault('fail_html', True)
return config
def add_entries(self, task, config):
for entry in task.accepted:
form_data = {}
try:
save_path = entry.render(entry.get('path', config.get('path', '')))
if save_path:
form_data['savepath'] = save_path
except RenderError as e:
logger.error('Error setting path for {}: {}', entry['title'], e)
label = entry.render(entry.get('label', config.get('label', '')))
if label:
form_data['label'] = label # qBittorrent v3.3.3-
form_data['category'] = label # qBittorrent v3.3.4+
add_paused = entry.get('add_paused', config.get('add_paused'))
if add_paused:
form_data['paused'] = 'true'
maxupspeed = entry.get('maxupspeed', config.get('maxupspeed'))
if maxupspeed:
form_data['upLimit'] = maxupspeed * 1024
maxdownspeed = entry.get('maxdownspeed', config.get('maxdownspeed'))
if maxdownspeed:
form_data['dlLimit'] = maxdownspeed * 1024
is_magnet = entry['url'].startswith('magnet:')
if task.manager.options.test:
logger.info('Test mode.')
logger.info('Would add torrent to qBittorrent with:')
if not is_magnet:
logger.info('File: {}', entry.get('file'))
else:
logger.info('Url: {}', entry.get('url'))
logger.info('Save path: {}', form_data.get('savepath'))
logger.info('Label: {}', form_data.get('label'))
logger.info('Paused: {}', form_data.get('paused', 'false'))
if maxupspeed:
logger.info('Upload Speed Limit: {}', form_data.get('upLimit'))
if maxdownspeed:
logger.info('Download Speed Limit: {}', form_data.get('dlLimit'))
continue
if not is_magnet:
if 'file' not in entry:
entry.fail('File missing?')
continue
if not os.path.exists(entry['file']):
tmp_path = os.path.join(task.manager.config_base, 'temp')
logger.debug('entry: {}', entry)
logger.debug('temp: {}', ', '.join(os.listdir(tmp_path)))
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
continue
self.add_torrent_file(entry['file'], form_data, config['verify_cert'])
else:
self.add_torrent_url(entry['url'], form_data, config['verify_cert'])
@plugin.priority(120)
def on_task_download(self, task, config):
"""
Call download plugin to generate torrent files to load into
qBittorrent.
"""
config = self.prepare_config(config)
if not config['enabled']:
return
if 'download' not in task.config:
download = plugin.get('download', self)
download.get_temp_files(task, handle_magnets=True, fail_html=config['fail_html'])
@plugin.priority(135)
def on_task_output(self, task, config):
"""Add torrents to qBittorrent at exit."""
if task.accepted:
config = self.prepare_config(config)
self.connect(config)
self.add_entries(task, config)
@event('plugin.register')
def register_plugin():
plugin.register(OutputQBitTorrent, 'qbittorrent', api_ver=2)
| {
"content_hash": "3879ed8c1fa844f653547bf5b1e8b67d",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 93,
"avg_line_length": 39.00803212851405,
"alnum_prop": 0.5284670029856893,
"repo_name": "ianstalk/Flexget",
"id": "441c5e696a1fefa0f19fbf1e71c75f735f5625b0",
"size": "9713",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/plugins/clients/qbittorrent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2063551"
}
],
"symlink_target": ""
} |
import sys
import json
import csv
if __name__ == "__main__":
try:
if len(sys.argv) < 2:
print "Usage: %s <JSON settings file>" % sys.argv[0]
print " <settings file>: access settings (IP/user/password)"
sys.exit(0)
f = open(sys.argv[1], 'r')
settings_file = json.load(f)
from ucsmsdk.ucshandle import UcsHandle
handle = UcsHandle(ip=settings_file['ip'], username=settings_file['user'], password=settings_file['pw'])
handle.login()
print "Deleting vMedia Policy:" #% (template)
mo = handle.query_dn("org-root/mnt-cfg-policy-RHEL-7-3")
handle.remove_mo(mo)
handle.commit()
handle.logout()
except Exception, err:
print "Exception:", str(err)
import traceback, sys
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
| {
"content_hash": "6163fa35195a27c93142a5e988015cd9",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 112,
"avg_line_length": 27,
"alnum_prop": 0.5588235294117647,
"repo_name": "jocook/s3260-python",
"id": "93c14b61a575d24ae364babf2b7eb784eb4f5c16",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ucsm_destroy_vMediaPolicy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "323126"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import generators
from __future__ import nested_scopes
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from server.io_multiplex import IOMultiplex
from server.request import WSGIRequest
from server.err_code import ERR_NULL_REQUEST, ERR_100_CONTINUE_REQUEST
import errno
import socket
from server.log import logging
try:
import cStringIO as StringIO
except (Exception, ):
import StringIO
class WSGIServer(object):
def __init__(self, host=None, port=None, keep_alive=True):
self.host = host
self.port = port
self.keep_alive = keep_alive
self.server_name = ""
self.handler = WSGIRequest
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.base_environ = {}
if host is not None and port is not None:
self.__socket.bind((host, port))
self.server_name = socket.getfqdn(host)
self.setup_environ()
self.__socket.listen(5)
self.running = False
self.application = None
self.multiplex = IOMultiplex.initialized()
self.connection_list = {}
self.response_list = {}
# 100 continue request
# This list of request just has start line and header
# without body
self.continue_request_list = {}
# 100 continue response
# This list of response to send 100 continue
self.continue_response_list = {}
def start(self):
if self.application is None:
raise Exception("application is None!")
self.multiplex.add_handler(fd=self.__socket.fileno(), handler=self.handle_connection,
eventmask=IOMultiplex.READ)
# while True:
#
# accepted = False
# try:
# conn, addr = self.__socket.accept()
# accepted = True
# except socket.error as ex:
# # (errno, string)
# if ex[0] in (errno.EWOULDBLOCK,):
# pass
# else:
# raise
#
# if accepted:
# rfile = conn.makefile("rb")
# wfile = conn.makefile("wb")
# request_handler = self.handler(self, rfile, wfile, addr)
#
# request_handler.handle_one_request()
# conn.close()
def handle_connection(self, fd, event):
try:
conn, addr = self.__socket.accept()
conn.setblocking(0)
self.connection_list[conn.fileno()] = (conn, addr)
self.multiplex.add_handler(fd=conn.fileno(), handler=self.handle_read_request, eventmask=IOMultiplex.READ)
except socket.error as ex:
print(ex)
if ex[0] in (errno.EWOULDBLOCK,):
pass
else:
raise
def handle_read_request(self, fd, event):
""" Temporarily function """
# To check if is the 100 continue request
if fd in self.continue_request_list:
conn, addr = self.connection_list[fd]
rfile = conn.makefile("rb")
wfile = conn.makefile("wb")
request = self.continue_request_list[fd]
request.rfile = rfile
request.wfile = wfile
err, msg, response = request.handle_one_request()
else:
conn, addr = self.connection_list[fd]
rfile = conn.makefile("rb")
wfile = conn.makefile("wb")
request_handler = self.handler(self, rfile, wfile, addr)
err, msg, response = request_handler.handle_one_request()
self.multiplex.remove_handler(fd)
if err == ERR_NULL_REQUEST:
# Get blank request, re-put it into read
logging.error("Get blank request from fd[%d]", fd)
# self.multiplex.add_handler(fd=conn.fileno(), handler=self.handle_read_request, eventmask=IOMultiplex.READ)
return
if err == ERR_100_CONTINUE_REQUEST:
# Retrun 100 continue response to client
logging.info("Get 100 continue request from fd[%d]", fd)
self.continue_request_list[fd] = request_handler
self.multiplex.add_handler(fd=conn.fileno(), handler=self.handle_read_request, eventmask=IOMultiplex.READ)
self.response_list[fd] = response
self.multiplex.add_handler(fd=fd, handler=self.handle_write_response, eventmask=IOMultiplex.WRITE)
def handle_write_response(self, fd, event):
conn, addr = self.connection_list[fd]
wfile = conn.makefile("wb")
response = self.response_list[fd]
if response is not None:
response.set_wfile(wfile)
response.handle_response()
del self.response_list[fd]
self.multiplex.remove_handler(fd)
if self.keep_alive:
self.multiplex.add_handler(fd=conn.fileno(), handler=self.handle_read_request, eventmask=IOMultiplex.READ)
else:
del self.connection_list[fd]
conn.close()
def bind(self, host, port):
""" Bind host and port to server socket """
if self.running is False:
self.__socket.bind((host, port))
self.server_name = socket.getfqdn(host)
self.setup_environ()
def close(self):
""" Close server socket """
self.running = False
self.__socket.close()
def set_app(self, application):
""" Set server app """
self.application = application
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.port)
env['REMOTE_HOST'] = ''
env['CONTENT_LENGTH'] = ''
env['SCRIPT_NAME'] = ''
env['HTTPS'] = 'off'
def set_blocking(self, flag):
if self.running is False:
self.__socket.setblocking(flag)
| {
"content_hash": "c4656b93a871d6d9aa8d5e951854a390",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 124,
"avg_line_length": 33.61497326203209,
"alnum_prop": 0.573814826598791,
"repo_name": "JianMingZhuo/WSGIServer",
"id": "6f33289d345209bf2fd6e435661283dfbeca2fcc",
"size": "6912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43226"
}
],
"symlink_target": ""
} |
'''
app.emails
~~~~~~~~~~~~~~~~
Defining all the email functions here.
'''
from flask_mail import Message
from flask import render_template
from config import ADMINS
from app import mail, app
from .decorators import async_dec
@async_dec
def send_async_email(app, msg):
'''Send mail asynchronously.'''
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
'''Set email content and send it asynchronously'''
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
send_async_email(app, msg)
def follower_notification(followed, follower):
'''Emailing to users for being followed.'''
send_email('[microblog] %s is now following you!' % follower.nickname,
ADMINS[0],
[followed.email],
render_template('follower_email.txt',
user=followed, follower=follower),
render_template('follower_email.html',
user=followed, follower=follower)) | {
"content_hash": "52bd36b8d591905638aa16a081746fbd",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 80,
"avg_line_length": 32.97222222222222,
"alnum_prop": 0.5846672283066554,
"repo_name": "Napchat/microblog",
"id": "055b562b56a72522b654658e34963eb8833c9b13",
"size": "1187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/emails.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10470"
},
{
"name": "JavaScript",
"bytes": "128944"
},
{
"name": "Python",
"bytes": "35229"
}
],
"symlink_target": ""
} |
import os
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import Menu
from Domain.Discipline import Discipline
from View.GUI.InputDialog import InputDialog
class DisciplineView(tk.Frame):
def __init__(self, master, catalogController, commandController):
# Set up our variables and controllers
self.master = master
self._catalogController = catalogController
self._commandController = commandController
self.generateTree()
def generateTree(self, sort=0):
# Call the tk.Frame constructor
tk.Frame.__init__(self, self.master)
# Create the treeview
self.tree = ttk.Treeview(self)
ysb = ttk.Scrollbar(self, orient='vertical', command=self.tree.yview) # Add Scrollbars
xsb = ttk.Scrollbar(self, orient='horizontal', command=self.tree.xview) # Add Scrollbars
self.tree.configure(yscroll=ysb.set, xscroll=xsb.set) # Bind scrollbars to treeview
self.tree.heading('#0', text='Name', anchor='center') # Set up the headings
self.tree["columns"] = ("one") # Set up the columns
self.tree.column("one", width=100) # Set up the size of column 'one'
self.tree.heading("one", text="Grade") # Set up the header of column 'one'
# Process data here
disciplines = self._catalogController.getJoinByDisciplines()
if sort == 0:
disciplines = sorted(disciplines, key=lambda x: x['discipline'].getName())
else:
disciplines = sorted(disciplines, key=lambda x: self._catalogController.getAverageByDisciplineId(
x['discipline'].getId()), reverse=True)
for disciplineNode in disciplines:
discipline = disciplineNode['discipline']
root_node = self.tree.insert('', 'end', text=discipline.getName(),
values=(self._catalogController.getAverageByDisciplineId(discipline.getId())),
tags=('disciplineRoot', discipline.getId()), open=False)
self.process_discipline_students(root_node, disciplineNode['students'], discipline)
# Add the tree to the grid
self.tree.grid(row=0, column=0, sticky="nesw")
ysb.grid(row=0, column=1, sticky='ns') # Configure the sticky zones
xsb.grid(row=1, column=0, sticky='ew') # Configure the sticky zones
# Bind events
self.tree.bind("<ButtonRelease-3>", self.popup)
# Pack the grid
self.grid(sticky="nesw")
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
tk.Button(self, text='Add New Discipline', width=50, height=1,
command=lambda: self._add_new_discipline_handler()).grid(column=0, sticky="nesw")
tk.Label(self, text='List Options', width=50, height=1).grid(column=0, sticky="nesw")
tk.Button(self, text='Sort Alphabetically', width=50, height=1,
command=lambda: self.generateTree(0)).grid(column=0, sticky="nesw")
tk.Button(self, text='Sort By Average', width=50, height=1, command=lambda: self.generateTree(1)
).grid(column=0, sticky="nesw")
# self.pack(fill=tk.BOTH, expand=1)
def selectItem(self, a):
curItem = self.tree.focus()
print(self.tree.item(curItem))
def popup(self, event):
"""
action in event of button 3 on tree view
"""
# popup.add_separator()
# popup.add_command(label="Home")
# select row under mouse
iid = self.tree.identify_row(event.y)
if iid:
# mouse pointer over item
self.tree.selection_set(iid)
selection = self.tree.item(iid)
# If it's a discipline, show the context menu for that discipline
if 'disciplineRoot' in selection['tags']:
popup = Menu(self.master, tearoff=0)
popup.add_command(label="Change Discipline Name", command=lambda: self._change_discipline_name_handler(selection['tags'][1]))
popup.add_command(label="Delete Discipline", command=lambda: self._handle_remove_discipline(selection['tags'][1]))
popup.tk_popup(event.x_root, event.y_root, 0)
else:
# mouse pointer not over item
# occurs when items do not fill frame
# no action required
popup = Menu(self.master, tearoff=0)
popup.add_command(label="Add New Discipline", command=self._add_new_discipline_handler)
popup.tk_popup(event.x_root, event.y_root, 0)
def _change_discipline_name_handler(self, id):
self.w = InputDialog(self.master, header="Input the new name")
self.master.wait_window(self.w.top)
if hasattr(self.w, 'value'): # Users are bad and sometimes close without talking with me ;-;
self._catalogController.updateDiscipline(id, self.w.value)
self.generateTree() # UPDATE TREE
def _add_new_discipline_handler(self):
# Create popup to ask for the discipline id
self.idPopup = InputDialog(self.master, header="Input the discipline id")
self.master.wait_window(self.idPopup.top)
if hasattr(self.idPopup, 'value'): # Users are bad and sometimes close without talking with me ;-;
# Create popup to ask for the discipline name
self.namePopup = InputDialog(self.master, header="Input the discipline name")
self.master.wait_window(self.namePopup.top)
if hasattr(self.namePopup, 'value'): # Users are bad and sometimes close without talking with me ;-;
try:
self._catalogController.addDiscipline(Discipline(int(self.idPopup.value), self.namePopup.value))
self.generateTree() # UPDATE TREE
except ValueError:
print("wow no")
def _handle_remove_discipline(self, id):
self.w = InputDialog(self.master, header="To remove the discipline, please type CONFIRM")
self.master.wait_window(self.w.top)
if hasattr(self.w, 'value'): # Users are bad and sometimes close without talking with me ;-;
if self.w.value == "CONFIRM":
self._catalogController.removeDisciplineById(int(id))
self.generateTree() # UPDATE TREE
def process_discipline_students(self, parent, students, discipline):
"""
Used to process the Students that are enrolled at a Discipline
"""
"""
Discipline:
* Student
* Student
"""
for student in students:
student_node = self.tree.insert(parent, 'end', text=student.getName(), values=(
self._catalogController.getAverageAtDisciplineByStudentID(student.getId(), discipline.getId())),
tags=('studentNode', discipline.getId(), student.getId()), open=False)
self.process_disciplines_for_student(student_node, student, discipline)
def process_disciplines_for_student(self, parent, student, discipline):
"""
Used to process the Grades of a Student that is enrolled at a Discipline
"""
"""
Discipline:
Student
* Grade
* Grade
Student
"""
for grade in self._catalogController.getGradesByStudentIdAndDisciplineId(student.getId(), discipline.getId()):
self.tree.insert(parent, 'end', text=discipline.getName(), values=(grade.getGrade()),
tags=('gradeNode', student.getId(), discipline.getId()), open=False) | {
"content_hash": "e82962dec3627d563d63926db2bdcf75",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 141,
"avg_line_length": 48.50931677018634,
"alnum_prop": 0.612932138284251,
"repo_name": "Zephyrrus/ubb",
"id": "a09e37aeab3d889076b4ad17ef3abf59bb803dd2",
"size": "7810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "YEAR 1/SEM1/FP/LAB/l6-l9/View/GUI/DisciplineView.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "96"
},
{
"name": "Assembly",
"bytes": "24190"
},
{
"name": "Batchfile",
"bytes": "80"
},
{
"name": "C",
"bytes": "504974"
},
{
"name": "C#",
"bytes": "116117"
},
{
"name": "C++",
"bytes": "406145"
},
{
"name": "CMake",
"bytes": "116836"
},
{
"name": "CSS",
"bytes": "507511"
},
{
"name": "Common Lisp",
"bytes": "4926"
},
{
"name": "Dockerfile",
"bytes": "601"
},
{
"name": "HTML",
"bytes": "774629"
},
{
"name": "Hack",
"bytes": "1348"
},
{
"name": "Java",
"bytes": "225193"
},
{
"name": "JavaScript",
"bytes": "1323357"
},
{
"name": "Kotlin",
"bytes": "80576"
},
{
"name": "M",
"bytes": "812"
},
{
"name": "MATLAB",
"bytes": "14300"
},
{
"name": "Makefile",
"bytes": "62922"
},
{
"name": "PHP",
"bytes": "26576"
},
{
"name": "PLSQL",
"bytes": "3270"
},
{
"name": "PLpgSQL",
"bytes": "73862"
},
{
"name": "Perl 6",
"bytes": "324"
},
{
"name": "Prolog",
"bytes": "5214"
},
{
"name": "Python",
"bytes": "315759"
},
{
"name": "QMake",
"bytes": "5282"
},
{
"name": "Shell",
"bytes": "4089"
},
{
"name": "TSQL",
"bytes": "79222"
},
{
"name": "XSLT",
"bytes": "1953"
},
{
"name": "Yacc",
"bytes": "1718"
}
],
"symlink_target": ""
} |
import logging
import unittest
import config
import thread_cert
# Test description: Here is the test case `5.11.1 DUA-TC-04: DUA re-registration`
#
# Topology:
# -----------(eth)----------------
# | |
# Router_1----BR_1---HOST
# \ /
# Router_2
#
from pktverify.packet_verifier import PacketVerifier
BR_1 = 1
ROUTER1 = 2
ROUTER2 = 3
HOST = 4
class BBR_5_11_01(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR_1: {
'name': 'BR_1',
'allowlist': [ROUTER1, ROUTER2],
'is_otbr': True,
'version': '1.2',
},
ROUTER1: {
'name': 'Router_1',
'allowlist': [ROUTER2, BR_1],
'version': '1.2',
},
ROUTER2: {
'name': 'Router_2',
'allowlist': [ROUTER1, BR_1],
'version': '1.2',
},
HOST: {
'name': 'Host',
'is_host': True
},
}
def test(self):
self.nodes[HOST].start()
# P1: Router_1 is configured with leader weight of 72 in case the test is executed on a CCM network
self.nodes[ROUTER1].set_weight(72)
self.nodes[ROUTER1].start()
self.simulator.go(config.LEADER_STARTUP_DELAY)
self.assertEqual('leader', self.nodes[ROUTER1].get_state())
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[ROUTER2].get_state())
# The OTBR docker enables SRP Server by default, lets explicitly
# disable SRP server to avoid Network Data population.
# TODO: Enhance the test script to tolerate additional Sertivce TLV
# in Network Data.
self.nodes[BR_1].srp_server_set_enabled(False)
self.nodes[BR_1].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[BR_1].get_state())
self.nodes[BR_1].enable_backbone_router()
self.simulator.go(3)
self.assertTrue(self.nodes[BR_1].is_primary_backbone_router)
self.nodes[BR_1].add_prefix(config.DOMAIN_PREFIX, "parosD")
self.nodes[BR_1].register_netdata()
self.simulator.go(5)
self.assertIsNotNone(self.nodes[ROUTER2].get_ip6_address(config.ADDRESS_TYPE.DUA))
self.simulator.go(10) # must wait for DUA_DAD_REPEATS to complete
logging.info("Host addresses: %r", self.nodes[HOST].get_addrs())
self.assertGreaterEqual(len(self.nodes[HOST].get_addrs()), 2)
self.collect_ipaddrs()
self.collect_rloc16s()
Dg = self.nodes[ROUTER2].get_ip6_address(config.ADDRESS_TYPE.DUA)
self.collect_extra_vars(Dg=Dg)
logging.info("BR_1 addrs: %r", self.nodes[BR_1].get_addrs())
logging.info("Host addrs: %r", self.nodes[HOST].get_addrs())
# BR_1 and Host can ping each other on the Backbone link
self.assertTrue(self.nodes[HOST].ping(self.nodes[BR_1].get_ip6_address(config.ADDRESS_TYPE.BACKBONE_GUA),
backbone=True))
self.assertTrue(self.nodes[BR_1].ping(self.nodes[HOST].get_ip6_address(config.ADDRESS_TYPE.BACKBONE_GUA),
backbone=True))
# Step 23: Host sends ping packet to destination Dg (successful if DUA features work)
self.assertTrue(self.nodes[HOST].ping(Dg, backbone=True))
def verify(self, pv: PacketVerifier):
pkts = pv.pkts
pv.add_common_vars()
pv.summary.show()
pv.verify_attached('BR_1')
MM = pv.vars['MM_PORT']
BB = pv.vars['BB_PORT']
BR_1 = pv.vars['BR_1']
BR_1_ETH = pv.vars['BR_1_ETH']
Host_ETH = pv.vars['Host_ETH']
BR_1_BGUA = pv.vars['BR_1_BGUA']
Host_BGUA = pv.vars['Host_BGUA']
Dg = pv.vars['Dg'] # DUA of Router_2
ROUTER2 = pv.vars['Router_2']
# Step 3: BR_1: Checks received Network Data and determines that it needs to send its BBR Dataset to the
# leader to become primary BBR.
pkts.filter_wpan_src64(BR_1).filter_coap_request('/a/sd', port=MM).must_next().must_verify("""
thread_nwd.tlv.server_16 is not null
and thread_nwd.tlv.service.s_data.seqno is not null
and thread_nwd.tlv.service.s_data.rrdelay is not null
and thread_nwd.tlv.service.s_data.mlrtimeout is not null
""")
pv.verify_dua_registration(ROUTER2, Dg, pbbr_eth=BR_1_ETH, pbbr_src64=BR_1)
# Verify Host ping BBR
pkts.filter_eth_src(Host_ETH).filter_ipv6_src_dst(Host_BGUA, BR_1_BGUA).filter_ping_request().must_next()
pkts.filter_eth_src(BR_1_ETH).filter_ipv6_src_dst(BR_1_BGUA, Host_BGUA).filter_ping_reply().must_next()
# Verify BR_1 ping Host
pkts.filter_eth_src(BR_1_ETH).filter_ipv6_src_dst(BR_1_BGUA, Host_BGUA).filter_ping_request().must_next()
pkts.filter_eth_src(Host_ETH).filter_ipv6_src_dst(Host_BGUA, BR_1_BGUA).filter_ping_reply().must_next()
# Step 16: Host: Queries DUA, Dg, with ND-NS
pkts.filter_eth_src(Host_ETH).filter_icmpv6_nd_ns(Dg).must_next()
# Step 17: BR_1: Responds with a neighbor advertisement.
pkts.filter_eth_src(BR_1_ETH).filter_icmpv6_nd_na(Dg).must_next()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "982ac2bcc10c6920c79eb7acb41f6c4c",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 113,
"avg_line_length": 37.229166666666664,
"alnum_prop": 0.587203879873158,
"repo_name": "abtink/openthread",
"id": "63d2b08b814f7ebec2d1784bf9010820f26fdb49",
"size": "6965",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/backbone/bbr_5_11_01.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2295"
},
{
"name": "C",
"bytes": "1554371"
},
{
"name": "C++",
"bytes": "8051010"
},
{
"name": "CMake",
"bytes": "106238"
},
{
"name": "Dockerfile",
"bytes": "5901"
},
{
"name": "M4",
"bytes": "32606"
},
{
"name": "Makefile",
"bytes": "189483"
},
{
"name": "Python",
"bytes": "4542699"
},
{
"name": "Shell",
"bytes": "161238"
}
],
"symlink_target": ""
} |
import pytest
import asyncio
import os
from datetime import datetime, timedelta
from aiohttp.test_utils import loop_context
from os import environ as env
from app import create, TestConfig
from app.views import Post, Comment, parse_post_options
@pytest.fixture
def app(loop):
return create(loop, conf=TestConfig)
@pytest.yield_fixture
def loop():
with loop_context() as loop:
yield loop
@pytest.yield_fixture
def event_loop(loop):
"""
This is needed for correct functioning of the test_client
of aiohttp together with pytest.mark.asyncio pytest-asyncio decorator.
For more info check the following link:
https://github.com/KeepSafe/aiohttp/issues/939
"""
loop._close = loop.close
loop.close = lambda: None
yield loop
loop.close = loop._close
@pytest.fixture
def test_client_auth(
loop, test_client, app, fixt_auth_header,
):
client_task = loop.run_until_complete(test_client(app))
def auth_method(obj, method_name):
""" Monkey-patch original method """
new_method_name = 'original_%s' % method_name
original_fun = getattr(obj, method_name)
setattr(obj, new_method_name, original_fun)
async def fun(url, **kwargs):
kwargs.update(fixt_auth_header)
new_fun = getattr(obj, new_method_name)
return await new_fun(url, **kwargs)
return fun
client_task.get = auth_method(client_task, 'get')
client_task.post = auth_method(client_task, 'post')
client_task.delete = auth_method(client_task, 'delete')
client_task.put = auth_method(client_task, 'put')
yield client_task
@pytest.fixture
def test_client_no_auth(loop, test_client, app):
client_task = loop.run_until_complete(test_client(app))
yield client_task
@pytest.fixture
def fixt_auth_header():
return {'headers': {'Authorization': 'Token TestToken'}}
@pytest.fixture
def fixt_blog_comment(fixt_blog_post):
return Comment(
author='Comment Author', date='2016-04-05T12:52:00',
content='Hello Comment',
post_slug=fixt_blog_post.slug, email='john@doe.pl')
@pytest.fixture
def fixt_blog_post():
return Post(
title='Title 1', subtitle='Sub 1', date='2010-01-01T11:11:00',
slug='slug-1', author='P', options=parse_post_options(''),
image='', content='Test content 1')
@pytest.fixture
def fixt_blog_post_image():
return Post(
title='Title 1', subtitle='Sub 1', date='2010-01-01T11:11:00',
slug='slug-1', author='P', options=parse_post_options(''),
image='/static/img/test.jpg', content='Test content 1')
@pytest.fixture
def fixt_blog_post_comments_disabled():
return Post(
title='Title 1', subtitle='Sub 1', date='2010-01-01T11:11:00',
slug='slug-1', author='P',
options=parse_post_options('disable_comments'),
image='', content='Test content 1')
@pytest.fixture
def fixt_blog_posts():
return [
Post(
title='Title 1', subtitle='Sub 1', date='2010-01-01T11:11:00',
slug='slug-1', author='P', options=parse_post_options(''),
image='', content='Test content 1'),
Post(
title='Title 2', subtitle='Sub 2', date='2010-01-02T11:11:00',
slug='slug-2', author='D', options=parse_post_options(''),
image='', content='Test content 2'),
]
@pytest.fixture
def fixt_blog_posts_two_pages():
return [
Post(
title='Title 1', subtitle='Sub 1', date='2010-01-01T11:11:00',
slug='slug-1', author='P', options=parse_post_options(''),
image='', content='Test content 1'),
Post(
title='Title 2', subtitle='Sub 2', date='2010-01-02T11:11:00',
slug='slug-2', author='D', options=parse_post_options(''),
image='', content='Test content 2'),
Post(
title='Title 3', subtitle='Sub 3', date='2010-01-03T11:11:00',
slug='slug-3', author='E', options=parse_post_options(''),
image='', content='Test content 3'),
Post(
title='Title 4', subtitle='Sub 4', date='2010-01-04T11:11:00',
slug='slug-4', author='Q', options=parse_post_options(''),
image='', content='Test content 4'),
Post(
title='Title 5', subtitle='Sub 5', date='2010-01-05T11:11:00',
slug='slug-5', author='T', options=parse_post_options(''),
image='', content='Test content 5'),
Post(
title='Title 6', subtitle='Sub 6', date='2010-01-06T11:11:00',
slug='slug-6', author='Y', options=parse_post_options(''),
image='', content='Test content 6'),
]
class Any(object):
def __eq__(self, x):
return True
def __repr__(self):
return 'Any'
def __ne__(self, x):
return not self.__eq__(x)
class AlmostSimilarDateTime(object):
def __init__(self, expected, threshold=1):
self.expected = expected
self.threshold = threshold
def __eq__(self, x):
y = self.expected
if isinstance(x, str):
x = datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.%f')
if y < x:
x, y = y, x
return y - x < timedelta(seconds=self.threshold)
def __repr__(self):
return 'AlmostSimilarDateTime {}s {}'.format(
self.threshold, self.expected)
def __ne__(self, x):
return not self.__eq__(x)
| {
"content_hash": "71b34f1edc17e0ec7cae5749973a71b3",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 74,
"avg_line_length": 30.22099447513812,
"alnum_prop": 0.596709323583181,
"repo_name": "wzuo/blog-markdown-aio",
"id": "c64c53f272b06ad9e321bec198e9c1f6449d1a32",
"size": "5494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/app/tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23017"
},
{
"name": "JavaScript",
"bytes": "46117"
},
{
"name": "Python",
"bytes": "32718"
}
],
"symlink_target": ""
} |
from openravepy import (
CollisionOptions,
CollisionOptionsStateSaver,
CollisionReport,
RaveCreateKinBody,
openrave_exception,
)
from prpy.exceptions import PrPyException
from prpy.planning.exceptions import (
CollisionPlanningError,
SelfCollisionPlanningError,
)
class SimpleRobotCollisionChecker(object):
"""RobotCollisionChecker which uses the standard OpenRAVE interface.
This RobotCollisionChecker is instantiated with a robot,
and when the CheckCollision() method is called, it attempts
an Env followed by a Self collision check (with shortcutting)
through the standard OpenRAVE interface. If a collision is
found, either CollisionPlanningError or SelfCollisionPlanningError
is raised.
"""
def __init__(self, robot, collision_options):
self.robot = robot
self.env = robot.GetEnv()
self.checker = self.env.GetCollisionChecker()
if self.checker is None:
raise PrPyException('No collision checker found on environment')
self.collision_saver = CollisionOptionsStateSaver(
self.checker, collision_options)
@property
def collision_options(self):
return self.collision_saver.newoptions
def __enter__(self):
self.collision_saver.__enter__()
return self
def __exit__(self, type, value, traceback):
self.collision_saver.__exit__(type, value, traceback)
def CheckCollision(self, report=None):
if self.env.CheckCollision(self.robot, report=report):
return True
elif self.robot.CheckSelfCollision(report=report):
return True
return False
def VerifyCollisionFree(self):
report = CollisionReport()
if self.env.CheckCollision(self.robot, report=report):
raise CollisionPlanningError.FromReport(report)
elif self.robot.CheckSelfCollision(report=report):
raise SelfCollisionPlanningError.FromReport(report)
class BakedRobotCollisionChecker(object):
"""RobotCollisionChecker which uses a baked collision interface.
When this RobotCollisionChecker is instantiated with a robot,
it interfaces with a collision checker which supports the baking
interface first implemented in or_fcl. This takes time on
initialization to pre-allocate and optimize the underlying
datastructures, in order to speed up the subsequent
CheckCollision() calls (e.g. in an inner planner loop).
Since underlying implementations do not currently return which
links caused a collision, this checker always returns a basic
CollisionPlanningError on collision.
"""
def __init__(self, robot, collision_options=CollisionOptions.ActiveDOFs):
self.robot = robot
self.env = robot.GetEnv()
self.checker = self.env.GetCollisionChecker()
if self.checker is None:
raise PrPyException('No collision checker found on environment')
self.baked_kinbody = None
self.collision_saver = CollisionOptionsStateSaver(
self.checker, collision_options)
@property
def collision_options(self):
return self.collision_saver.newoptions
def __enter__(self):
if self.baked_kinbody is not None:
raise PrPyException(
'Another baked KinBody is available. Did you call __enter__'
' twice or forget to call __exit__?')
try:
kb_type = self.checker.SendCommand('BakeGetType')
except openrave_exception:
raise PrPyException('Collision checker does not support baking')
# TODO: How should we handle exceptions that are thrown below?
self.collision_saver.__enter__()
# This "bakes" the following Env and Self checks.
# (after the bake, the composite check is stored in self.baked_kinbody)
self.checker.SendCommand('BakeBegin')
self.env.CheckCollision(self.robot)
self.robot.CheckSelfCollision()
self.baked_kinbody = RaveCreateKinBody(self.env, kb_type)
if self.baked_kinbody is None:
raise PrPyException('Failed to create baked KinBody.')
self.checker.SendCommand('BakeEnd')
return self
def __exit__(self, type, value, traceback):
if self.baked_kinbody is None:
raise PrPyException(
'No baked KinBody is available. Did you call __exit__'
' without calling __enter__ first?')
del self.baked_kinbody
self.baked_kinbody = None
self.collision_saver.__exit__(type, value, traceback)
def CheckCollision(self, report=None):
if self.baked_kinbody is None:
raise PrPyException(
'No baked KinBody is available. Did you call __enter__?')
# The baked check is performed by checking self collision on baked
return self.checker.CheckSelfCollision(self.baked_kinbody, report)
def VerifyCollisionFree(self):
report = CollisionReport()
if self.CheckCollision(report=report):
raise CollisionPlanningError.FromReport(report)
class SimpleRobotCollisionCheckerFactory(object):
def __init__(self, collision_options=CollisionOptions.ActiveDOFs):
self.collision_options = collision_options
def __call__(self, robot):
return SimpleRobotCollisionChecker(robot, self.collision_options)
class BakedRobotCollisionCheckerFactory(object):
def __init__(self, collision_options=CollisionOptions.ActiveDOFs):
self.collision_options = collision_options
def __call__(self, robot):
return BakedRobotCollisionChecker(robot, self.collision_options)
DefaultRobotCollisionCheckerFactory = SimpleRobotCollisionCheckerFactory()
| {
"content_hash": "01fe32df95af0dab1376f2cacc8a75fe",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 79,
"avg_line_length": 35.201219512195124,
"alnum_prop": 0.6847393036549454,
"repo_name": "personalrobotics/prpy",
"id": "d0685a73b125152f32cf3b04c33805805a8971a5",
"size": "7419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/prpy/collision.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "188"
},
{
"name": "Makefile",
"bytes": "41"
},
{
"name": "Python",
"bytes": "585970"
}
],
"symlink_target": ""
} |
import sys
from cdrouter import CDRouter
from cdrouter.configs import Config
if len(sys.argv) < 3:
print('usage: <base_url> <token>')
sys.exit(1)
base = sys.argv[1]
token = sys.argv[2]
# create service
c = CDRouter(base, token=token)
cfg = c.configs.create(Config(
name='My Config File',
contents="""
testvar lanIp 192.168.1.1
testvar lanMask 255.255.255.0
"""))
print('New config has ID {}'.format(cfg.id))
| {
"content_hash": "9f1c42dfa0268afd22a0721d885475f1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 44,
"avg_line_length": 18.695652173913043,
"alnum_prop": 0.6767441860465117,
"repo_name": "qacafe/cdrouter.py",
"id": "0a44dc20d7509c8c5d82fce313280b6145c672dd",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/create_config.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "150"
},
{
"name": "Python",
"bytes": "409892"
},
{
"name": "Shell",
"bytes": "2426"
}
],
"symlink_target": ""
} |
from math import sqrt, log
class Node:
""" A node in the game tree. Note wins is always from the viewpoint of playerJustMoved.
Crashes if state not specified.
"""
def __init__(self, move = None, parent = None, state = None):
self.move = move # the move that got us to this node - "None" for the root node
self.parentNode = parent # "None" for the root node
self.childNodes = []
self.wins = 0
self.visits = 0
self.untriedMoves = state.get_moves() # future child nodes
self.playerJustMoved = state.playerJustMoved # the only part of the state that the Node needs later
def UCTSelectChild(self):
""" Use the UCB1 formula to select a child node. Often a constant UCTK is applied so we have
lambda c: c.wins/c.visits + UCTK * sqrt(2*log(self.visits)/c.visits to vary the amount of
exploration versus exploitation.
"""
s = sorted(self.childNodes, key = lambda c: c.wins/c.visits + sqrt(2*log(self.visits)/c.visits))[-1]
return s
def AddChild(self, m, s):
""" Remove m from untriedMoves and add a new child node for this move.
Return the added child node
"""
n = Node(move = m, parent = self, state = s)
self.untriedMoves.remove(m)
self.childNodes.append(n)
return n
def Update(self, result):
""" Update this node - one additional visit and result additional wins. result must be from the viewpoint of playerJustmoved.
"""
self.visits += 1
self.wins += result
def __repr__(self):
return "[M:" + str(self.move) + " W/V:" + str(self.wins) + "/" + str(self.visits) + " U:" + str(self.untriedMoves) + "]"
def TreeToString(self, indent):
s = self.IndentString(indent) + str(self)
for c in self.childNodes:
s += c.TreeToString(indent+1)
return s
def IndentString(self,indent):
s = "\n"
for i in range (1,indent+1):
s += "| "
return s
def ChildrenToString(self):
s = ""
for c in self.childNodes:
s += str(c) + "\n"
return s | {
"content_hash": "f4563286b86e0ec36ff6c1e17ea9ca11",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 133,
"avg_line_length": 37.08474576271186,
"alnum_prop": 0.5845521023765996,
"repo_name": "crainiarc/poker-ai-planner",
"id": "7d89168dd2859c6168d8fe63b0dea5e99f66cc18",
"size": "2188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agents/mcts/node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "19635"
},
{
"name": "C++",
"bytes": "2057640"
},
{
"name": "Java",
"bytes": "7550"
},
{
"name": "Makefile",
"bytes": "1882"
},
{
"name": "Python",
"bytes": "72493"
}
],
"symlink_target": ""
} |
import decimal
import json
import os
import time
from datetime import datetime
import pytest
import pytz
import snowflake.connector
from snowflake.connector import (constants, errorcode, errors)
from snowflake.connector.compat import (BASE_EXCEPTION_CLASS, PY2)
def _create_warehouse(conn, db_parameters):
"""
Use the test warehouse, database and schema
"""
def exe(sql):
return conn.cursor().execute(sql)
exe("create or replace warehouse {0} warehouse_size=small, "
"warehouse_type=standard".format(db_parameters['name_wh']))
exe("use warehouse {0}".format(db_parameters['name_wh']))
exe("use {0}.{1}".format(db_parameters['database'],
db_parameters['schema']))
def _drop_warehouse(conn, db_parameters):
conn.cursor().execute("drop warehouse if exists {0}".format(
db_parameters['name_wh']
))
@pytest.fixture()
def conn(request, conn_cnx, db_parameters):
def fin():
with conn_cnx() as cnx:
cnx.cursor().execute(
'use {db}.{schema}'.format(
db=db_parameters['database'],
schema=db_parameters['schema']))
cnx.cursor().execute("drop table {name}".format(
name=db_parameters['name']))
request.addfinalizer(fin)
with conn_cnx() as cnx:
cnx.cursor().execute("""
create table {name} (
aa int,
dt date,
tm time,
ts timestamp,
tsltz timestamp_ltz,
tsntz timestamp_ntz,
tstz timestamp_tz,
pct float,
ratio number(5,2),
b binary)
""".format(name=db_parameters['name']))
return conn_cnx
def _check_results(cursor, results):
assert cursor.sfqid, 'Snowflake query id is None'
assert cursor.rowcount == 3, 'the number of records'
assert results[0] == 65432, 'the first result was wrong'
assert results[1] == 98765, 'the second result was wrong'
assert results[2] == 123456, 'the third result was wrong'
def test_insert_select(conn, db_parameters):
"""
Inserts and selects integer data
"""
with conn() as cnx:
c = cnx.cursor()
try:
c.execute(
"insert into {name}(aa) values(123456),"
"(98765),(65432)".format(name=db_parameters['name']))
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 3, 'wrong number of records were inserted'
assert c.rowcount == 3, 'wrong number of records were inserted'
finally:
c.close()
try:
c = cnx.cursor()
c.execute("select aa from {name} order by aa".format(
name=db_parameters['name']))
results = []
for rec in c:
results.append(rec[0])
_check_results(c, results)
finally:
c.close()
with cnx.cursor(snowflake.connector.DictCursor) as c:
c.execute("select aa from {name} order by aa".format(
name=db_parameters['name']))
results = []
for rec in c:
results.append(rec['AA'])
_check_results(c, results)
def test_insert_and_select_by_separate_connection(
conn, db_parameters):
"""
Insert a record and select it by a separate connection.
"""
with conn() as cnx:
result = cnx.cursor().execute(
"insert into {name}(aa) values({value})".format(
name=db_parameters['name'], value='1234'))
cnt = 0
for rec in result:
cnt += int(rec[0])
assert cnt == 1, 'wrong number of records were inserted'
assert result.rowcount == 1, 'wrong number of records were inserted'
cnx2 = snowflake.connector.connect(
user=db_parameters['user'],
password=db_parameters['password'],
host=db_parameters['host'],
port=db_parameters['port'],
account=db_parameters['account'],
database=db_parameters['database'],
schema=db_parameters['schema'],
timezone='UTC',
protocol='http',
)
_create_warehouse(cnx2, db_parameters)
try:
c = cnx2.cursor()
c.execute("select aa from {name}".format(name=db_parameters['name']))
results = []
for rec in c:
results.append(rec[0])
c.close()
assert results[0] == 1234, 'the first result was wrong'
assert result.rowcount == 1, 'wrong number of records were selected'
finally:
_drop_warehouse(cnx2, db_parameters)
cnx2.close()
def _total_milliseconds_from_timedelta(td):
"""
Returns the total number of milliseconds contained in the duration object.
"""
return (td.microseconds + (
td.seconds + td.days * 24 * 3600) * 10 ** 6) // 10 ** 3
def _total_seconds_from_timedelta(td):
"""
Returns the total number of seconds contained in the duration object.
"""
return _total_milliseconds_from_timedelta(td) // 10 ** 3
def test_insert_timestamp_select(conn, db_parameters):
"""
Insert and get timestamp, timestamp with tz, date, and time.
Currently the session parameter TIMEZONE is ignored
"""
PST_TZ = "America/Los_Angeles"
JST_TZ = "Asia/Tokyo"
current_timestamp = datetime.utcnow()
current_timestamp = current_timestamp.replace(tzinfo=pytz.timezone(PST_TZ))
current_date = current_timestamp.date()
current_time = current_timestamp.time()
other_timestamp = current_timestamp.replace(tzinfo=pytz.timezone(JST_TZ))
with conn() as cnx:
cnx.cursor().execute("alter session set TIMEZONE=%s", (PST_TZ,))
c = cnx.cursor()
try:
fmt = ("insert into {name}(aa, ts, tstz, tsntz, dt, tm) "
"values(%(value)s,%(ts)s, %(tstz)s, %(tsntz)s, "
"%(dt)s, %(tm)s)")
c.execute(fmt.format(name=db_parameters['name']), {
'value': 1234,
'ts': current_timestamp,
'tstz': other_timestamp,
'tsntz': current_timestamp,
'dt': current_date,
'tm': current_time
})
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 1, 'wrong number of records were inserted'
assert c.rowcount == 1, 'wrong number of records were selected'
finally:
c.close()
cnx2 = snowflake.connector.connect(
user=db_parameters['user'],
password=db_parameters['password'],
host=db_parameters['host'],
port=db_parameters['port'],
account=db_parameters['account'],
database=db_parameters['database'],
schema=db_parameters['schema'],
timezone='UTC',
protocol='http'
)
_create_warehouse(cnx2, db_parameters)
try:
c = cnx2.cursor()
c.execute("select aa, ts, tstz, tsntz, dt, tm from {name}".format(
name=db_parameters['name']))
result_numeric_value = []
result_timestamp_value = []
result_other_timestamp_value = []
result_ntz_timestamp_value = []
result_date_value = []
result_time_value = []
for (aa, ts, tstz, tsntz, dt, tm) in c:
result_numeric_value.append(aa)
result_timestamp_value.append(ts)
result_other_timestamp_value.append(tstz)
result_ntz_timestamp_value.append(tsntz)
result_date_value.append(dt)
result_time_value.append(tm)
c.close()
assert result_numeric_value[0] == 1234, \
'the integer result was wrong'
td_diff = _total_milliseconds_from_timedelta(
current_timestamp - result_timestamp_value[0])
assert td_diff == 0, 'the timestamp result was wrong'
td_diff = _total_milliseconds_from_timedelta(
other_timestamp - result_other_timestamp_value[0])
assert td_diff == 0, 'the other timestamp result was wrong'
td_diff = _total_milliseconds_from_timedelta(
current_timestamp.replace(tzinfo=None) -
result_ntz_timestamp_value[0])
assert td_diff == 0, 'the other timestamp result was wrong'
assert current_date == result_date_value[0], \
'the date result was wrong'
assert current_time == result_time_value[0], \
'the time result was wrong'
desc = c.description
assert len(desc) == 6, 'invalid number of column meta data'
assert desc[0][0].upper() == 'AA', 'invalid column name'
assert desc[1][0].upper() == 'TS', 'invalid column name'
assert desc[2][0].upper() == 'TSTZ', 'invalid column name'
assert desc[3][0].upper() == 'TSNTZ', 'invalid column name'
assert desc[4][0].upper() == 'DT', 'invalid column name'
assert desc[5][0].upper() == 'TM', 'invalid column name'
assert constants.FIELD_ID_TO_NAME[desc[0][1]] == 'FIXED', \
'invalid column name: {0}'.format(
constants.FIELD_ID_TO_NAME[desc[0][1]])
assert constants.FIELD_ID_TO_NAME[desc[1][1]] == 'TIMESTAMP_LTZ', \
'invalid column name'
assert constants.FIELD_ID_TO_NAME[desc[2][1]] == 'TIMESTAMP_TZ', \
'invalid column name'
assert constants.FIELD_ID_TO_NAME[desc[3][1]] == 'TIMESTAMP_NTZ', \
'invalid column name'
assert constants.FIELD_ID_TO_NAME[desc[4][1]] == 'DATE', \
'invalid column name'
assert constants.FIELD_ID_TO_NAME[desc[5][1]] == 'TIME', \
'invalid column name'
finally:
_drop_warehouse(cnx2, db_parameters)
cnx2.close()
def test_insert_timestamp_ltz(conn, db_parameters):
"""
Inserts and retrieve timestamp ltz
"""
tzstr = 'America/New_York'
# sync with the session parameter
with conn() as cnx:
cnx.cursor().execute(
"alter session set timezone='{tzstr}'".format(tzstr=tzstr))
current_time = datetime.now()
current_time = current_time.replace(tzinfo=pytz.timezone(tzstr))
c = cnx.cursor()
try:
fmt = "insert into {name}(aa, tsltz) values(%(value)s,%(ts)s)"
c.execute(fmt.format(name=db_parameters['name']), {
'value': 8765,
'ts': current_time,
})
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 1, 'wrong number of records were inserted'
finally:
c.close()
try:
c = cnx.cursor()
c.execute("select aa,tsltz from {name}".format(
name=db_parameters['name']))
result_numeric_value = []
result_timestamp_value = []
for (aa, ts) in c:
result_numeric_value.append(aa)
result_timestamp_value.append(ts)
td_diff = _total_milliseconds_from_timedelta(
current_time - result_timestamp_value[0])
assert td_diff == 0, 'the first result was wrong'
finally:
c.close()
def test_struct_time(conn, db_parameters):
"""
Binds struct_time object for updating timestamp
"""
tzstr = 'America/New_York'
os.environ['TZ'] = tzstr
time.tzset()
test_time = time.strptime("30 Sep 01 11:20:30", "%d %b %y %H:%M:%S")
with conn() as cnx:
c = cnx.cursor()
try:
fmt = "insert into {name}(aa, tsltz) values(%(value)s,%(ts)s)"
c.execute(fmt.format(name=db_parameters['name']), {
'value': 87654,
'ts': test_time,
})
cnt = 0
for rec in c:
cnt += int(rec[0])
finally:
c.close()
os.environ['TZ'] = 'UTC'
time.tzset()
assert cnt == 1, 'wrong number of records were inserted'
try:
result = cnx.cursor().execute(
"select aa, tsltz from {name}".format(
name=db_parameters['name']))
for (aa, tsltz) in result:
pass
tsltz -= tsltz.tzinfo.utcoffset(tsltz)
assert test_time.tm_year == tsltz.year, "Year didn't match"
assert test_time.tm_mon == tsltz.month, "Month didn't match"
assert test_time.tm_mday == tsltz.day, "Day didn't match"
assert test_time.tm_hour == tsltz.hour, "Hour didn't match"
assert test_time.tm_min == tsltz.minute, "Minute didn't match"
assert test_time.tm_sec == tsltz.second, "Second didn't match"
finally:
os.environ['TZ'] = 'UTC'
time.tzset()
@pytest.mark.skipif(PY2, reason="""
Binary not supported in Python 2 connector.
""")
def test_insert_binary_select(conn, db_parameters):
"""
Insert and get a binary value.
"""
value = b'\x00\xFF\xA1\xB2\xC3'
with conn() as cnx:
c = cnx.cursor()
try:
fmt = ("insert into {name}(b) values(%(b)s)")
c.execute(fmt.format(name=db_parameters['name']), {'b': value})
count = sum(int(rec[0]) for rec in c)
assert count == 1, 'wrong number of records were inserted'
assert c.rowcount == 1, 'wrong number of records were selected'
finally:
c.close()
cnx2 = snowflake.connector.connect(
user=db_parameters['user'],
password=db_parameters['password'],
host=db_parameters['host'],
port=db_parameters['port'],
account=db_parameters['account'],
database=db_parameters['database'],
schema=db_parameters['schema'],
protocol='http'
)
_create_warehouse(cnx2, db_parameters)
try:
c = cnx2.cursor()
c.execute("select b from {name}".format(name=db_parameters['name']))
results = [b for (b,) in c]
assert value == results[0], 'the binary result was wrong'
desc = c.description
assert len(desc) == 1, 'invalid number of column meta data'
assert desc[0][0].upper() == 'B', 'invalid column name'
assert constants.FIELD_ID_TO_NAME[desc[0][1]] == 'BINARY', \
'invalid column name'
finally:
_drop_warehouse(cnx2, db_parameters)
cnx2.close()
def test_insert_binary_select_with_bytearray(conn, db_parameters):
"""
Insert and get a binary value using the bytearray type.
"""
value = bytearray(b'\x00\xFF\xA1\xB2\xC3')
with conn() as cnx:
c = cnx.cursor()
try:
fmt = ("insert into {name}(b) values(%(b)s)")
c.execute(fmt.format(name=db_parameters['name']), {'b': value})
count = sum(int(rec[0]) for rec in c)
assert count == 1, 'wrong number of records were inserted'
assert c.rowcount == 1, 'wrong number of records were selected'
finally:
c.close()
cnx2 = snowflake.connector.connect(
user=db_parameters['user'],
password=db_parameters['password'],
host=db_parameters['host'],
port=db_parameters['port'],
account=db_parameters['account'],
database=db_parameters['database'],
schema=db_parameters['schema'],
protocol='http'
)
_create_warehouse(cnx2, db_parameters)
try:
c = cnx2.cursor()
c.execute("select b from {name}".format(name=db_parameters['name']))
results = [b for (b,) in c]
assert bytes(value) == results[0], 'the binary result was wrong'
desc = c.description
assert len(desc) == 1, 'invalid number of column meta data'
assert desc[0][0].upper() == 'B', 'invalid column name'
assert constants.FIELD_ID_TO_NAME[desc[0][1]] == 'BINARY', \
'invalid column name'
finally:
_drop_warehouse(cnx2, db_parameters)
cnx2.close()
def test_variant(conn, db_parameters):
"""Variant including JSON object
"""
name_variant = db_parameters['name'] + "_variant"
with conn() as cnx:
cnx.cursor().execute("""
create table {name} (
created_at timestamp, data variant)
""".format(name=name_variant))
try:
with conn() as cnx:
current_time = datetime.now()
c = cnx.cursor()
try:
fmt = ("insert into {name}(created_at, data) "
"select column1, parse_json(column2) "
"from values(%(created_at)s, %(data)s)")
c.execute(fmt.format(name=name_variant), {
'created_at': current_time,
'data': ('{"SESSION-PARAMETERS":{'
'"TIMEZONE":"UTC", "SPECIAL_FLAG":true}}')
})
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 1, 'wrong number of records were inserted'
assert c.rowcount == 1, \
'wrong number of records were inserted'
finally:
c.close()
result = cnx.cursor().execute(
"select created_at, data from {name}".format(
name=name_variant))
_, data = result.fetchone()
data = json.loads(data)
assert data['SESSION-PARAMETERS']['SPECIAL_FLAG'], \
("JSON data should be parsed properly. "
"Invalid JSON data")
finally:
with conn() as cnx:
cnx.cursor().execute(
"drop table {name}".format(name=name_variant))
def test_callproc(conn_cnx):
"""Callproc. nop as of now
"""
with conn_cnx() as cnx:
with pytest.raises(errors.NotSupportedError):
cnx.cursor().callproc("whatever the stored procedure")
def test_invalid_bind_data_type(conn_cnx):
"""Invalid bind data type
"""
with conn_cnx() as cnx:
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute(
"select 1 from dual where 1=%s", ([1, 2, 3],))
def test_timeout_query(conn_cnx):
"""Timeout
"""
with conn_cnx() as cnx:
c = cnx.cursor()
try:
c.execute(
'select seq8() as c1 '
'from table(generator(rowCount => 100000001))',
timeout=1)
raise Exception("Must be canceled")
except BASE_EXCEPTION_CLASS as err:
assert isinstance(err, errors.ProgrammingError), \
"Programming Error Exception"
assert err.errno == 604, "Invalid error code"
finally:
c.close()
c = cnx.cursor()
try:
c.execute(
'select seq8() as c1 '
'from table(generator(rowCount => 100000002))',
timeout=1)
raise Exception("Must be canceled")
except BASE_EXCEPTION_CLASS as err:
assert isinstance(err, errors.ProgrammingError), \
"Programming Error Exception"
assert err.errno == 604, "Invalid error code"
finally:
c.close()
def test_executemany(conn, db_parameters):
"""Executes many statements. Client binding is supported by either
dictor list data types.
NOTE the binding data type is dict and tuple, respectively
"""
with conn() as cnx:
c = cnx.cursor()
fmt = 'insert into {name}(aa) values(%(value)s)'.format(
name=db_parameters['name'])
c.executemany(fmt, [
{'value': '1234'},
{'value': '234'},
{'value': '34'},
{'value': '4'},
])
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 4, 'number of records'
assert c.rowcount == 4, 'wrong number of records were inserted'
c.close()
c = cnx.cursor()
fmt = 'insert into {name}(aa) values(%s)'.format(
name=db_parameters['name'])
c.executemany(fmt, [
(12345,),
(1234,),
(234,),
(34,),
(4,),
])
rec = c.fetchone()
assert rec[0] == 5, 'number of records'
assert c.rowcount == 5, 'wrong number of records were inserted'
c.close()
def test_closed_cursor(conn, db_parameters):
"""
Attempt to use the closed cursor. It should raise errors
NOTE the binding data type is scalar
"""
with conn() as cnx:
c = cnx.cursor()
fmt = 'insert into {name}(aa) values(%s)'.format(
name=db_parameters['name'])
c.executemany(fmt, [
12345,
1234,
234,
34,
4,
])
rec = c.fetchone()
assert rec[0] == 5, 'number of records'
assert c.rowcount == 5, 'number of records'
c.close()
fmt = 'select aa from {name}'.format(name=db_parameters['name'])
try:
c.execute(fmt)
raise Exception('should fail as the cursor was closed.')
except snowflake.connector.Error as err:
assert err.errno == errorcode.ER_CURSOR_IS_CLOSED
def test_fetchmany(conn, db_parameters):
"""
Fetches many
"""
with conn() as cnx:
c = cnx.cursor()
fmt = 'insert into {name}(aa) values(%(value)s)'.format(
name=db_parameters['name'])
c.executemany(fmt, [
{'value': '3456789'},
{'value': '234567'},
{'value': '1234'},
{'value': '234'},
{'value': '34'},
{'value': '4'},
])
cnt = 0
for rec in c:
cnt += int(rec[0])
assert cnt == 6, 'number of records'
assert c.rowcount == 6, 'number of records'
c.close()
c = cnx.cursor()
fmt = 'select aa from {name} order by aa desc'.format(
name=db_parameters['name'])
c.execute(fmt)
rows = c.fetchmany(2)
assert len(rows) == 2, 'The number of records'
assert rows[1][0] == 234567, 'The second record'
rows = c.fetchmany(1)
assert len(rows) == 1, 'The number of records'
assert rows[0][0] == 1234, 'The first record'
rows = c.fetchmany(5)
assert len(rows) == 3, 'The number of records'
assert rows[-1][0] == 4, 'The last record'
rows = c.fetchmany(15)
assert len(rows) == 0, 'The number of records'
c.close()
def test_process_params(conn, db_parameters):
"""Binds variables for insert and other queries
"""
with conn() as cnx:
c = cnx.cursor()
fmt = 'insert into {name}(aa) values(%(value)s)'.format(
name=db_parameters['name'])
c.executemany(fmt, [
{'value': '3456789'},
{'value': '234567'},
{'value': '1234'},
{'value': '234'},
{'value': '34'},
{'value': '4'},
])
cnt = 0
for rec in c:
cnt += int(rec[0])
c.close()
assert cnt == 6, 'number of records'
fmt = 'select count(aa) from {name} where aa > %(value)s'.format(
name=db_parameters['name'])
c = cnx.cursor()
c.execute(fmt, {'value': 1233})
for (cnt,) in c:
pass
assert cnt == 3, 'the number of records'
c.close()
fmt = 'select count(aa) from {name} where aa > %s'.format(
name=db_parameters['name'])
c = cnx.cursor()
c.execute(fmt, (1234,))
for (cnt,) in c:
pass
assert cnt == 2, 'the number of records'
c.close()
def test_real_decimal(conn, db_parameters):
"""Uses Real and Decimal type
"""
with conn() as cnx:
c = cnx.cursor()
fmt = ('insert into {name}(aa, pct, ratio) '
'values(%s,%s,%s)').format(
name=db_parameters['name'])
c.execute(fmt, (9876, 12.3, decimal.Decimal('23.4')))
for (cnt,) in c:
pass
assert cnt == 1, 'the number of records'
c.close()
c = cnx.cursor()
fmt = 'select aa, pct, ratio from {name}'.format(
name=db_parameters['name'])
c.execute(fmt)
for (aa, pct, ratio) in c:
pass
assert aa == 9876, 'the integer value'
assert pct == 12.3, 'the float value'
assert ratio == decimal.Decimal('23.4'), 'the decimal value'
c.close()
with cnx.cursor(snowflake.connector.DictCursor) as c:
fmt = 'select aa, pct, ratio from {name}'.format(
name=db_parameters['name'])
c.execute(fmt)
rec = c.fetchone()
assert rec['AA'] == 9876, 'the integer value'
assert rec['PCT'] == 12.3, 'the float value'
assert rec['RATIO'] == decimal.Decimal('23.4'), 'the decimal value'
def test_none_errorhandler(conn_testaccount):
"""
None errorhandler for Cursor
"""
c = conn_testaccount.cursor()
with pytest.raises(errors.ProgrammingError):
c.errorhandler = None
def test_nope_errorhandler(conn_testaccount):
"""
NOOP errorhandler for Cursor
"""
def user_errorhandler(connection, cursor, errorclass, errorvalue):
pass
c = conn_testaccount.cursor()
c.errorhandler = user_errorhandler
c.execute("select * foooooo never_exists_table")
c.execute("select * barrrrr never_exists_table")
c.execute("select * daaaaaa never_exists_table")
assert c.messages[0][0] == errors.ProgrammingError, \
'One error was recorded'
assert len(c.messages) == 1, 'should be one error'
def test_binding_negative(conn_cnx, db_parameters):
"""
Negative binding tests
"""
with conn_cnx() as cnx:
with pytest.raises(TypeError):
cnx.cursor().execute(
"INSERT INTO {name}(aa) VALUES(%s)".format(
name=db_parameters['name']), (1, 2, 3))
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute(
"INSERT INTO {name}(aa) VALUES(%s)".format(
name=db_parameters['name']), ())
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute(
"INSERT INTO {name}(aa) VALUES(%s)".format(
name=db_parameters['name']), (['a'],))
def test_execute_after_close(conn_testaccount):
"""
SNOW-13588: raises an error if executing after the connection is closed
"""
cursor = conn_testaccount.cursor()
conn_testaccount.close()
with pytest.raises(errors.Error):
cursor.execute('show tables')
def test_cancel_query(conn_cnx):
with conn_cnx() as cnx:
# run one query first to set the client API version to V2
sql = "select count(*) from table(generator(timelimit=>1))"
cnx.cursor().execute(sql)
# cancel the query.
sql = "select count(*) from table(generator(timelimit=>1000))"
with pytest.raises(errors.ProgrammingError):
cnx.cursor().execute(sql, timeout=1)
def test_multi_table_insert(conn, db_parameters):
try:
with conn() as cnx:
cur = cnx.cursor()
cur.execute("""
INSERT INTO {name}(aa) VALUES(1234),(9876),(2345)
""".format(name=db_parameters['name']))
assert cur.rowcount == 3, 'the number of records'
cur.execute("""
CREATE OR REPLACE TABLE {name}_foo (aa_foo int)
""".format(name=db_parameters['name']))
cur.execute("""
CREATE OR REPLACE TABLE {name}_bar (aa_bar int)
""".format(name=db_parameters['name']))
cur.execute("""
INSERT ALL
INTO {name}_foo(aa_foo) VALUES(aa)
INTO {name}_bar(aa_bar) VALUES(aa)
SELECT aa FROM {name}
""".format(name=db_parameters['name']))
assert cur.rowcount == 6
finally:
with conn() as cnx:
cnx.cursor().execute("""
DROP TABLE IF EXISTS {name}_foo
""".format(name=db_parameters['name']))
cnx.cursor().execute("""
DROP TABLE IF EXISTS {name}_bar
""".format(name=db_parameters['name']))
@pytest.mark.skipif(True, reason="""
Negative test case.
""")
def test_fetch_before_execute(conn_testaccount):
"""
SNOW-13574: fetch before execute
"""
cursor = conn_testaccount.cursor()
with pytest.raises(errors.DataError):
cursor.fetchone()
def test_close_twice(conn_testaccount):
conn_testaccount.close()
conn_testaccount.close()
def test_fetch_out_of_range_timestamp_value(conn):
with conn() as cnx:
cur = cnx.cursor()
cur.execute("""
select '12345-01-02'::timestamp_ntz
""")
with pytest.raises(errors.InterfaceError):
cur.fetchone()
| {
"content_hash": "d9c560ca769d1605db108a04a244691f",
"timestamp": "",
"source": "github",
"line_count": 883,
"max_line_length": 79,
"avg_line_length": 32.58776896942242,
"alnum_prop": 0.5490877497827976,
"repo_name": "mayfield/snowflake-connector-python",
"id": "24cef78870d7ea31e17d442d195b2780f2932469",
"size": "28896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_cursor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "617787"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
import pytest
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PostBuildPluginsRunner
from atomic_reactor.plugins.post_tag_and_push import TagAndPushPlugin
from atomic_reactor.util import ImageName
from tests.constants import LOCALHOST_REGISTRY, TEST_IMAGE, INPUT_IMAGE, MOCK, DOCKER0_REGISTRY
import json
import os.path
from tempfile import mkdtemp
if MOCK:
import docker
from flexmock import flexmock
from tests.docker_mock import mock_docker
DIGEST1 = 'sha256:28b64a8b29fd2723703bb17acf907cd66898440270e536992b937899a4647414'
PUSH_LOGS_1_10 = [
b'{"status":"The push refers to a repository [localhost:5000/busybox]"}',
b'{"status":"Preparing","progressDetail":{},"id":"5f70bf18a086"}',
b'{"status":"Preparing","progressDetail":{},"id":"9508eff2c687"}',
b'{"status":"Pushing","progressDetail":{"current":721920,"total":1113436},"progress":"[================================\\u003e ] 721.9 kB/1.113 MB","id":"9508eff2c687"}',
b'{"status":"Pushing","progressDetail":{"current":1024},"progress":"1.024 kB","id":"5f70bf18a086"}',
b'{"status":"Pushing","progressDetail":{"current":820224,"total":1113436},"progress":"[====================================\\u003e ] 820.2 kB/1.113 MB","id":"9508eff2c687"}',
b'{"status":"Pushed","progressDetail":{},"id":"5f70bf18a086"}',
b'{"status":"Pushed","progressDetail":{},"id":"5f70bf18a086"}',
b'{"status":"Pushing","progressDetail":{"current":1300992,"total":1113436},"progress":"[==================================================\\u003e] 1.301 MB","id":"9508eff2c687"}',
b'{"status":"Pushing","progressDetail":{"current":1310720,"total":1113436},"progress":"[==================================================\\u003e] 1.311 MB","id":"9508eff2c687"}',
b'{"status":"Pushed","progressDetail":{},"id":"9508eff2c687"}',
b'{"status":"Pushed","progressDetail":{},"id":"9508eff2c687"}',
b'{"status":"latest: digest: ' + DIGEST1.encode('utf-8') + b' size: 1920"}',
b'{"progressDetail":{},"aux":{"Tag":"latest","Digest":"' + DIGEST1.encode('utf-8') + b'","Size":1920}}' ]
PUSH_LOGS_1_10_NOT_IN_STATUS = list(PUSH_LOGS_1_10)
del PUSH_LOGS_1_10_NOT_IN_STATUS[-2]
PUSH_LOGS_1_9 = [
b'{"status":"The push refers to a repository [172.17.42.1:5000/ns/test-image2] (len: 1)"}',
b'{"status":"Buffering to Disk","progressDetail":{},"id":"83bca0dcfd1b"}',
b'{"status":"Pushing","progressDetail":{"current":1,"total":32},"progress":"[=\\u003e ] 1 B/32 B","id":"83bca0dcfd1b"}',
b'{"status":"Pushing","progressDetail":{"current":66813953,"total":66944370},"progress":"[=================================================\\u003e ] 66.81 MB/66.94 MB","id":"ded7cd95e059"}',
b'{"status":"Pushing","progressDetail":{"current":66944370,"total":66944370},"progress":"[==================================================\\u003e] 66.94 MB/66.94 MB","id":"ded7cd95e059"}',
b'{"status":"Image successfully pushed","progressDetail":{},"id":"ded7cd95e059"}',
b'{"status":"Image already exists","progressDetail":{},"id":"48ecf305d2cf"}',
b'{"status":"Digest: ' + DIGEST1.encode('utf-8') + b'"}']
PUSH_LOGS_1_X = [ # don't remember which version does this
b'{"status":"The push refers to a repository [172.17.42.1:5000/ns/test-image2]"}',
b'{"status":"13cde7f2a483: Pushed "}',
b'{"status":"7.1-23: digest: ' + DIGEST1.encode('utf-8') + b' size: 1539"}']
PUSH_ERROR_LOGS = [
b'{"status":"The push refers to a repository [xyz/abc] (len: 1)"}\r\n',
b'{"errorDetail":{"message":"error message detail"},"error":"error message"}',
]
class Y(object):
pass
class X(object):
image_id = INPUT_IMAGE
source = Y()
source.dockerfile_path = None
source.path = None
base_image = ImageName(repo="qwe", tag="asd")
@pytest.mark.parametrize("use_secret", [
True,
False,
])
@pytest.mark.parametrize(("image_name", "logs", "should_raise"), [
(TEST_IMAGE, PUSH_LOGS_1_X, False),
(TEST_IMAGE, PUSH_LOGS_1_9, False),
(TEST_IMAGE, PUSH_LOGS_1_10, False),
(TEST_IMAGE, PUSH_LOGS_1_10_NOT_IN_STATUS, False),
(DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_X, True),
(DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_9, True),
(DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_10, True),
(DOCKER0_REGISTRY + '/' + TEST_IMAGE, PUSH_LOGS_1_10_NOT_IN_STATUS, True),
(TEST_IMAGE, PUSH_ERROR_LOGS, True),
])
def test_tag_and_push_plugin(tmpdir, image_name, logs, should_raise, use_secret):
if MOCK:
mock_docker()
flexmock(docker.Client, push=lambda iid, **kwargs: iter(logs),
login=lambda username, registry, dockercfg_path: {'Status': 'Login Succeeded'})
tasker = DockerTasker()
workflow = DockerBuildWorkflow({"provider": "git", "uri": "asd"}, TEST_IMAGE)
workflow.tag_conf.add_primary_image(image_name)
setattr(workflow, 'builder', X)
secret_path = None
if use_secret:
temp_dir = mkdtemp()
with open(os.path.join(temp_dir, ".dockercfg"), "w+") as dockerconfig:
dockerconfig_contents = {
LOCALHOST_REGISTRY: {
"username": "user", "email": "test@example.com", "password": "mypassword"}}
dockerconfig.write(json.dumps(dockerconfig_contents))
dockerconfig.flush()
secret_path = temp_dir
runner = PostBuildPluginsRunner(
tasker,
workflow,
[{
'name': TagAndPushPlugin.key,
'args': {
'registries': {
LOCALHOST_REGISTRY: {
'insecure': True,
'secret': secret_path
}
}
},
}]
)
if should_raise:
with pytest.raises(Exception):
runner.run()
else:
output = runner.run()
image = output[TagAndPushPlugin.key][0]
tasker.remove_image(image)
assert len(workflow.push_conf.docker_registries) > 0
if MOCK:
# we only test this when mocking docker because we don't expect
# running actual docker against v2 registry
assert workflow.push_conf.docker_registries[0].digests[image_name] == DIGEST1
@pytest.mark.parametrize("logs", [
PUSH_LOGS_1_X,
PUSH_LOGS_1_9,
PUSH_LOGS_1_10,
PUSH_LOGS_1_10_NOT_IN_STATUS
])
def test_extract_digest(logs):
json_logs = [json.loads(l.decode('utf-8')) for l in logs]
digest = TagAndPushPlugin.extract_digest(json_logs)
assert digest == DIGEST1
@pytest.mark.parametrize("tag,should_succeed", [
('latest', True),
('earliest', False),
])
def test_extract_digest_verify_tag(tag, should_succeed):
json_logs = [json.loads(l.decode('utf-8')) for l in PUSH_LOGS_1_10_NOT_IN_STATUS]
digest = TagAndPushPlugin.extract_digest(json_logs, tag)
if should_succeed:
assert digest == DIGEST1
else:
assert digest is None
| {
"content_hash": "4a2b0a80e0bf4f503bce504230dc00d2",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 194,
"avg_line_length": 43.076470588235296,
"alnum_prop": 0.6009832036050798,
"repo_name": "jpopelka/atomic-reactor",
"id": "4f4c3e01e81ef7514ff902c33b8c0e7e3f11b75b",
"size": "7323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/test_tag_and_push.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "570871"
},
{
"name": "Shell",
"bytes": "3589"
}
],
"symlink_target": ""
} |
import os
import signal
import asyncio
import logging
import threading
import faulthandler
logger = logging.getLogger(__name__)
_glob_loop = None
_glob_thrd = None
def _asynciostacks(*args, **kwargs): # pragma: no cover
'''
A signal handler used to print asyncio task stacks and thread stacks.
'''
print(80 * '*')
print('Asyncio tasks stacks:')
tasks = asyncio.all_tasks(_glob_loop)
for task in tasks:
task.print_stack()
print(80 * '*')
print('Faulthandler stack frames per thread:')
faulthandler.dump_traceback()
print(80 * '*')
def _threadstacks(*args, **kwargs): # pragma: no cover
'''
A signal handler used to print thread stacks.
'''
print(80 * '*')
print('Faulthandler stack frames per thread:')
faulthandler.dump_traceback()
print(80 * '*')
signal.signal(signal.SIGUSR1, _threadstacks)
signal.signal(signal.SIGUSR2, _asynciostacks)
def initloop():
global _glob_loop
global _glob_thrd
# if there's no global loop....
if _glob_loop is None:
# check if it's us....
try:
_glob_loop = asyncio.get_running_loop()
# if we get here, it's us!
_glob_thrd = threading.current_thread()
# Enable debug and greedy coro collection
setGreedCoro(_glob_loop)
except RuntimeError:
_glob_loop = asyncio.new_event_loop()
setGreedCoro(_glob_loop)
_glob_thrd = threading.Thread(target=_glob_loop.run_forever, name='SynLoop', daemon=True)
_glob_thrd.start()
return _glob_loop
def setGreedCoro(loop: asyncio.AbstractEventLoop):
greedy_threshold = os.environ.get('SYN_GREEDY_CORO')
if greedy_threshold is not None: # pragma: no cover
logger.info(f'Setting ioloop.slow_callback_duration to {greedy_threshold}')
loop.set_debug(True)
loop.slow_callback_duration = float(greedy_threshold)
def iAmLoop():
initloop()
return threading.current_thread() == _glob_thrd
def sync(coro, timeout=None):
'''
Schedule a coroutine to run on the global loop and return it's result.
Args:
coro (coroutine): The coroutine instance.
Notes:
This API is thread safe and should only be called by non-loop threads.
'''
loop = initloop()
return asyncio.run_coroutine_threadsafe(coro, loop).result(timeout)
def synchelp(f):
'''
The synchelp decorator allows the transparent execution of
a coroutine using the global loop from a thread other than
the event loop. In both use cases, the actual work is done
by the global event loop.
Examples:
Use as a decorator::
@s_glob.synchelp
async def stuff(x, y):
await dostuff()
Calling the stuff function as regular async code using the standard await syntax::
valu = await stuff(x, y)
Calling the stuff function as regular sync code outside of the event loop thread::
valu = stuff(x, y)
'''
def wrap(*args, **kwargs):
coro = f(*args, **kwargs)
if not iAmLoop():
return sync(coro)
return coro
return wrap
| {
"content_hash": "d24a95ad13491e48ec2be30039506d7c",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 101,
"avg_line_length": 26.311475409836067,
"alnum_prop": 0.626791277258567,
"repo_name": "vertexproject/synapse",
"id": "faab5cafe0fff915dd55e9dd52ee470be4ca58b9",
"size": "3210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synapse/glob.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4010"
},
{
"name": "HTML",
"bytes": "3"
},
{
"name": "Python",
"bytes": "5894053"
},
{
"name": "Shell",
"bytes": "10776"
}
],
"symlink_target": ""
} |
import h2o
import h2o_cmd
import h2o_hosts
import sys
import time
import webbrowser
#import unittest, time, sys
#sys.path.extend(['.','..','py'])
h2o.config_json = "testdir_hosts/pytest_config-cypof.json"
h2o_hosts.build_cloud_with_hosts()
#h2o.build_cloud(4, java_heap_GB=1, capture_output=False, classpath=True)
file = csvPathname = h2o.find_file('smalldata/covtype/covtype.20k.data')
h2o_cmd.runKMeans(csvPathname=file, key='covtype', k=7)
webbrowser.open("http://localhost:54323/Progress.html?destination_key=covtype.kmeans")
time.sleep(1)
h2o.tear_down_cloud()
| {
"content_hash": "823f9b28e385a5bc30801bc9781bdb84",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 86,
"avg_line_length": 28.5,
"alnum_prop": 0.7543859649122807,
"repo_name": "woobe/h2o",
"id": "aa040f08038da3c62c6d76168273344dc25b4a68",
"size": "570",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/cypof.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from oslo_db import exception
from oslo_db.sqlalchemy import utils
from oslo_utils import uuidutils
import six
import sqlalchemy
from cloudkitty import db
from cloudkitty.rating.hash.db import api
from cloudkitty.rating.hash.db.sqlalchemy import migration
from cloudkitty.rating.hash.db.sqlalchemy import models
def get_backend():
return HashMap()
class HashMap(api.HashMap):
def get_migration(self):
return migration
def get_service(self, name=None, uuid=None):
session = db.get_session()
try:
q = session.query(models.HashMapService)
if name:
q = q.filter(
models.HashMapService.name == name)
elif uuid:
q = q.filter(
models.HashMapService.service_id == uuid)
else:
raise ValueError('You must specify either name or uuid.')
res = q.one()
return res
except sqlalchemy.orm.exc.NoResultFound:
raise api.NoSuchService(name=name, uuid=uuid)
def get_field(self, uuid=None, service_uuid=None, name=None):
session = db.get_session()
try:
q = session.query(models.HashMapField)
if uuid:
q = q.filter(
models.HashMapField.field_id == uuid)
elif service_uuid and name:
q = q.join(
models.HashMapField.service)
q = q.filter(
models.HashMapService.service_id == service_uuid,
models.HashMapField.name == name)
else:
raise ValueError('You must specify either an uuid'
' or a service_uuid and a name.')
res = q.one()
return res
except sqlalchemy.orm.exc.NoResultFound:
raise api.NoSuchField(uuid)
def get_group(self, uuid):
session = db.get_session()
try:
q = session.query(models.HashMapGroup)
q = q.filter(
models.HashMapGroup.group_id == uuid)
res = q.one()
return res
except sqlalchemy.orm.exc.NoResultFound:
raise api.NoSuchGroup(uuid=uuid)
def get_mapping(self, uuid):
session = db.get_session()
try:
q = session.query(models.HashMapMapping)
q = q.filter(
models.HashMapMapping.mapping_id == uuid)
res = q.one()
return res
except sqlalchemy.orm.exc.NoResultFound:
raise api.NoSuchMapping(uuid)
def get_threshold(self, uuid):
session = db.get_session()
try:
q = session.query(models.HashMapThreshold)
q = q.filter(
models.HashMapThreshold.threshold_id == uuid)
res = q.one()
return res
except sqlalchemy.orm.exc.NoResultFound:
raise api.NoSuchThreshold(uuid)
def get_group_from_mapping(self, uuid):
session = db.get_session()
try:
q = session.query(models.HashMapGroup)
q = q.join(
models.HashMapGroup.mappings)
q = q.filter(
models.HashMapMapping.mapping_id == uuid)
res = q.one()
return res
except sqlalchemy.orm.exc.NoResultFound:
raise api.MappingHasNoGroup(uuid=uuid)
def get_group_from_threshold(self, uuid):
session = db.get_session()
try:
q = session.query(models.HashMapGroup)
q = q.join(
models.HashMapGroup.thresholds)
q = q.filter(
models.HashMapThreshold.threshold_id == uuid)
res = q.one()
return res
except sqlalchemy.orm.exc.NoResultFound:
raise api.ThresholdHasNoGroup(uuid=uuid)
def list_services(self):
session = db.get_session()
q = session.query(models.HashMapService)
res = q.values(
models.HashMapService.service_id)
return [uuid[0] for uuid in res]
def list_fields(self, service_uuid):
session = db.get_session()
q = session.query(models.HashMapField)
q = q.join(
models.HashMapField.service)
q = q.filter(
models.HashMapService.service_id == service_uuid)
res = q.values(models.HashMapField.field_id)
return [uuid[0] for uuid in res]
def list_groups(self):
session = db.get_session()
q = session.query(models.HashMapGroup)
res = q.values(
models.HashMapGroup.group_id)
return [uuid[0] for uuid in res]
def list_mappings(self,
service_uuid=None,
field_uuid=None,
group_uuid=None,
no_group=False):
session = db.get_session()
q = session.query(models.HashMapMapping)
if service_uuid:
q = q.join(
models.HashMapMapping.service)
q = q.filter(
models.HashMapService.service_id == service_uuid)
elif field_uuid:
q = q.join(
models.HashMapMapping.field)
q = q.filter(models.HashMapField.field_id == field_uuid)
if group_uuid:
q = q.join(
models.HashMapMapping.group)
q = q.filter(models.HashMapGroup.group_id == group_uuid)
elif not service_uuid and not field_uuid:
raise ValueError('You must specify either service_uuid,'
' field_uuid or group_uuid.')
elif no_group:
q = q.filter(models.HashMapMapping.group_id == None) # noqa
res = q.values(
models.HashMapMapping.mapping_id)
return [uuid[0] for uuid in res]
def list_thresholds(self,
service_uuid=None,
field_uuid=None,
group_uuid=None,
no_group=False):
session = db.get_session()
q = session.query(models.HashMapThreshold)
if service_uuid:
q = q.join(
models.HashMapThreshold.service)
q = q.filter(
models.HashMapService.service_id == service_uuid)
elif field_uuid:
q = q.join(
models.HashMapThreshold.field)
q = q.filter(models.HashMapField.field_id == field_uuid)
if group_uuid:
q = q.join(
models.HashMapThreshold.group)
q = q.filter(models.HashMapGroup.group_id == group_uuid)
elif not service_uuid and not field_uuid:
raise ValueError('You must specify either service_uuid,'
' field_uuid or group_uuid.')
elif no_group:
q = q.filter(models.HashMapThreshold.group_id == None) # noqa
res = q.values(
models.HashMapThreshold.threshold_id)
return [uuid[0] for uuid in res]
def create_service(self, name):
session = db.get_session()
try:
with session.begin():
service_db = models.HashMapService(name=name)
service_db.service_id = uuidutils.generate_uuid()
session.add(service_db)
return service_db
except exception.DBDuplicateEntry:
service_db = self.get_service(name=name)
raise api.ServiceAlreadyExists(
service_db.name,
service_db.service_id)
def create_field(self, service_uuid, name):
service_db = self.get_service(uuid=service_uuid)
session = db.get_session()
try:
with session.begin():
field_db = models.HashMapField(
service_id=service_db.id,
name=name,
field_id=uuidutils.generate_uuid())
session.add(field_db)
# FIXME(sheeprine): backref are not populated as they used to be.
# Querying the item again to get backref.
field_db = self.get_field(service_uuid=service_uuid,
name=name)
except exception.DBDuplicateEntry:
raise api.FieldAlreadyExists(field_db.name, field_db.field_id)
else:
return field_db
def create_group(self, name):
session = db.get_session()
try:
with session.begin():
group_db = models.HashMapGroup(
name=name,
group_id=uuidutils.generate_uuid())
session.add(group_db)
return group_db
except exception.DBDuplicateEntry:
raise api.GroupAlreadyExists(name, group_db.group_id)
def create_mapping(self,
cost,
map_type='rate',
value=None,
service_id=None,
field_id=None,
group_id=None):
if field_id and service_id:
raise ValueError('You can only specify one parent.')
if not value and not service_id:
raise ValueError('You must either specify a value'
' or a service_id')
elif value and service_id:
raise ValueError('You can\'t specify a value'
' and a service_id.')
field_fk = None
if field_id:
field_db = self.get_field(uuid=field_id)
field_fk = field_db.id
service_fk = None
if service_id:
service_db = self.get_service(uuid=service_id)
service_fk = service_db.id
group_fk = None
if group_id:
group_db = self.get_group(uuid=group_id)
group_fk = group_db.id
session = db.get_session()
try:
with session.begin():
field_map = models.HashMapMapping(
mapping_id=uuidutils.generate_uuid(),
value=value,
cost=cost,
field_id=field_fk,
service_id=service_fk,
map_type=map_type)
if group_fk:
field_map.group_id = group_fk
session.add(field_map)
except exception.DBDuplicateEntry:
raise api.MappingAlreadyExists(value, field_map.field_id)
except exception.DBError:
raise api.NoSuchType(map_type)
# FIXME(sheeprine): backref are not populated as they used to be.
# Querying the item again to get backref.
field_map = self.get_mapping(field_map.mapping_id)
return field_map
def create_threshold(self,
level,
cost,
map_type='rate',
service_id=None,
field_id=None,
group_id=None):
if field_id and service_id:
raise ValueError('You can only specify one parent.')
field_fk = None
if field_id:
field_db = self.get_field(uuid=field_id)
field_fk = field_db.id
service_fk = None
if service_id:
service_db = self.get_service(uuid=service_id)
service_fk = service_db.id
group_fk = None
if group_id:
group_db = self.get_group(uuid=group_id)
group_fk = group_db.id
session = db.get_session()
try:
with session.begin():
threshold_db = models.HashMapThreshold(
threshold_id=uuidutils.generate_uuid(),
level=level,
cost=cost,
field_id=field_fk,
service_id=service_fk,
map_type=map_type)
if group_fk:
threshold_db.group_id = group_fk
session.add(threshold_db)
except exception.DBDuplicateEntry:
raise api.ThresholdAlreadyExists(level, threshold_db.field_id)
except exception.DBError:
raise api.NoSuchType(map_type)
# FIXME(sheeprine): backref are not populated as they used to be.
# Querying the item again to get backref.
threshold_db = self.get_threshold(threshold_db.threshold_id)
return threshold_db
def update_mapping(self, uuid, **kwargs):
session = db.get_session()
try:
with session.begin():
q = session.query(models.HashMapMapping)
q = q.filter(
models.HashMapMapping.mapping_id == uuid
)
mapping_db = q.with_lockmode('update').one()
if kwargs:
# Resolve FK
if 'group_id' in kwargs:
group_id = kwargs.pop('group_id')
if group_id:
group_db = self.get_group(group_id)
mapping_db.group_id = group_db.id
# Service and Field shouldn't be updated
excluded_cols = ['mapping_id', 'service_id', 'field_id']
for col in excluded_cols:
if col in kwargs:
kwargs.pop(col)
for attribute, value in six.iteritems(kwargs):
if hasattr(mapping_db, attribute):
setattr(mapping_db, attribute, value)
else:
raise ValueError('No such attribute: {}'.format(
attribute))
else:
raise ValueError('No attribute to update.')
return mapping_db
except sqlalchemy.orm.exc.NoResultFound:
raise api.NoSuchMapping(uuid)
def update_threshold(self, uuid, **kwargs):
session = db.get_session()
try:
with session.begin():
q = session.query(models.HashMapThreshold)
q = q.filter(
models.HashMapThreshold.threshold_id == uuid)
threshold_db = q.with_lockmode('update').one()
if kwargs:
# Resolve FK
if 'group_id' in kwargs:
group_id = kwargs.pop('group_id')
if group_id:
group_db = self.get_group(group_id)
threshold_db.group_id = group_db.id
# Service and Field shouldn't be updated
excluded_cols = ['threshold_id', 'service_id', 'field_id']
for col in excluded_cols:
if col in kwargs:
kwargs.pop(col)
for attribute, value in six.iteritems(kwargs):
if hasattr(threshold_db, attribute):
setattr(threshold_db, attribute, value)
else:
raise ValueError('No such attribute: {}'.format(
attribute))
else:
raise ValueError('No attribute to update.')
return threshold_db
except sqlalchemy.orm.exc.NoResultFound:
raise api.NoSuchThreshold(uuid)
def delete_service(self, name=None, uuid=None):
session = db.get_session()
q = utils.model_query(
models.HashMapService,
session)
if name:
q = q.filter(models.HashMapService.name == name)
elif uuid:
q = q.filter(models.HashMapService.service_id == uuid)
else:
raise ValueError('You must specify either name or uuid.')
r = q.delete()
if not r:
raise api.NoSuchService(name, uuid)
def delete_field(self, uuid):
session = db.get_session()
q = utils.model_query(
models.HashMapField,
session)
q = q.filter(models.HashMapField.field_id == uuid)
r = q.delete()
if not r:
raise api.NoSuchField(uuid)
def delete_group(self, uuid, recurse=True):
session = db.get_session()
q = utils.model_query(
models.HashMapGroup,
session)
q = q.filter(models.HashMapGroup.group_id == uuid)
with session.begin():
try:
r = q.with_lockmode('update').one()
except sqlalchemy.orm.exc.NoResultFound:
raise api.NoSuchGroup(uuid=uuid)
if recurse:
for mapping in r.mappings:
session.delete(mapping)
for threshold in r.thresholds:
session.delete(threshold)
q.delete()
def delete_mapping(self, uuid):
session = db.get_session()
q = utils.model_query(
models.HashMapMapping,
session)
q = q.filter(models.HashMapMapping.mapping_id == uuid)
r = q.delete()
if not r:
raise api.NoSuchMapping(uuid)
def delete_threshold(self, uuid):
session = db.get_session()
q = utils.model_query(
models.HashMapThreshold,
session)
q = q.filter(models.HashMapThreshold.threshold_id == uuid)
r = q.delete()
if not r:
raise api.NoSuchThreshold(uuid)
| {
"content_hash": "c1a1916727ea3a99d6b62d8045a15a9c",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 78,
"avg_line_length": 37.81974248927039,
"alnum_prop": 0.5150930549251022,
"repo_name": "muraliselva10/cloudkitty",
"id": "b5ad73a39930592a65da5e47e1952820d515f26c",
"size": "18286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkitty/rating/hash/db/sqlalchemy/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "2060"
},
{
"name": "Python",
"bytes": "526205"
},
{
"name": "Shell",
"bytes": "12562"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from openslides.saml import SAML_ENABLED
from openslides.utils.plugins import get_all_plugin_urlpatterns
urlpatterns = get_all_plugin_urlpatterns()
urlpatterns += [
url(r"^core/", include("openslides.core.urls")),
url(r"^users/", include("openslides.users.urls")),
]
if SAML_ENABLED:
urlpatterns += [url(r"^saml/", include("openslides.saml.urls"))]
| {
"content_hash": "1f559e5332778131da0a30e729e31bb8",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 27.2,
"alnum_prop": 0.7205882352941176,
"repo_name": "FinnStutzenstein/OpenSlides",
"id": "e4dc76c5f23e3f17bdc1760dbb346ff20530b728",
"size": "408",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/openslides/urls_apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124087"
},
{
"name": "Dockerfile",
"bytes": "853"
},
{
"name": "HTML",
"bytes": "449533"
},
{
"name": "JavaScript",
"bytes": "159617"
},
{
"name": "Python",
"bytes": "1398362"
},
{
"name": "Smarty",
"bytes": "7293"
},
{
"name": "TypeScript",
"bytes": "2473991"
}
],
"symlink_target": ""
} |
'''
Created on 9 aout 2014
@author: ludovicl
'''
import sys
# py2app (Mac OS X)
if sys.argv[1] == 'py2app' :
# Module : py2app
# usage : python standalone_setup.py py2app
# To reduce size on Mac OS X : ditto --rsrc --arch x86_64 oldapp.app newapp.app
from setuptools import setup
####Remove plist executable######
setup(
app=['RemovePreferences.py'],
setup_requires=['py2app'],
options={'py2app': dict(strip=True, optimize=2,)},
)
APP = ['Entry.py']
REMOVE_PREF = ['RemovePreferences.py']
DATA_FILES = [("images", ["Resources/images/texticon64.png", "Resources/images/imgicon64.png", "Resources/images/DragHere.png"])]
OPTIONS = dict(
site_packages=True,
arch='x86_64',
strip=True,
optimize=2,
excludes='',
iconfile='Resources/images/icon.icns',
plist={
'CFBundleName': 'Drag&Press',
'CFBundleShortVersionString':'0.1.0',
'CFBundleVersion': '0.1.0',
}
)
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
# cx_freeze setup (GNU/Linux)
if sys.argv[1] == "install" :
print "Before install"
# Module : cx_freeze
# usage : python standalone_setup.py install
from cx_Freeze import setup, Executable
####Remove plist executable######
setup(
name="RemovePref",
executables=[Executable(
script="RemovePreferences.py",
initScript=None,
targetName="RemovePreferences",
appendScriptToLibrary=False,
)],
)
options = {
'build_exe': {
'compressed': True,
'include_files':["Resources/images/texticon64.png", "Resources/images/imgicon64.png", "Resources/images/DragHere.png", "Resources/images/icon.png"],
'excludes':['_gtkagg', '_tkagg', 'bsddb', 'curses', 'email', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl',
'Tkconstants', 'Tkinter'],
}
}
app = Executable(
script="Entry.py",
initScript=None,
targetName="Drag&Press",
compress=True,
base=None,
copyDependentFiles=True,
appendScriptToExe=False,
appendScriptToLibrary=False,
icon="Resources/images/icon.png",
)
setup(
name="Drag&Press",
author='ludovicl',
# icon = "Resources/images/icon.png",
version="0.1",
description="Publish on a Wordpress blog by drag and droging",
options=options,
executables=[app],
)
| {
"content_hash": "a40d9a7c00ff4700d91318753b960952",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 160,
"avg_line_length": 26.394495412844037,
"alnum_prop": 0.5290232881473758,
"repo_name": "ludovicl/Drag-Press",
"id": "da17afecc5059571b6003a6a0f7a4ae2e9dc9a94",
"size": "2901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "standalone_setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19339"
}
],
"symlink_target": ""
} |
import rospy
import tf
from geometry_msgs.msg import PoseStamped, Quaternion, TwistStamped
from dbw_mkz_msgs.msg import SteeringReport, ThrottleCmd, BrakeCmd, SteeringCmd
from std_msgs.msg import Float32 as Float
from std_msgs.msg import Bool
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import Image
import sensor_msgs.point_cloud2 as pcl2
from std_msgs.msg import Header
from cv_bridge import CvBridge, CvBridgeError
from styx_msgs.msg import TrafficLight, TrafficLightArray, Lane
import numpy as np
from PIL import Image as PIL_Image
from io import BytesIO
import base64
import math
TYPE = {
'bool': Bool,
'float': Float,
'pose': PoseStamped,
'pcl': PointCloud2,
'twist': TwistStamped,
'steer': SteeringReport,
'trafficlights': TrafficLightArray,
'steer_cmd': SteeringCmd,
'brake_cmd': BrakeCmd,
'throttle_cmd': ThrottleCmd,
'path_draw': Lane,
'image':Image
}
class Bridge(object):
def __init__(self, conf, server):
rospy.init_node('styx_server')
self.server = server
self.vel = 0.
self.yaw = None
self.angular_vel = 0.
self.bridge = CvBridge()
self.callbacks = {
'/vehicle/steering_cmd': self.callback_steering,
'/vehicle/throttle_cmd': self.callback_throttle,
'/vehicle/brake_cmd': self.callback_brake,
'/final_waypoints': self.callback_path
}
self.subscribers = [rospy.Subscriber(e.topic, TYPE[e.type], self.callbacks[e.topic])
for e in conf.subscribers]
self.publishers = {e.name: rospy.Publisher(e.topic, TYPE[e.type], queue_size=1)
for e in conf.publishers}
def create_light(self, x, y, z, yaw, state):
light = TrafficLight()
light.header = Header()
light.header.stamp = rospy.Time.now()
light.header.frame_id = '/world'
light.pose = self.create_pose(x, y, z, yaw)
light.state = state
return light
def create_pose(self, x, y, z, yaw=0.):
pose = PoseStamped()
pose.header = Header()
pose.header.stamp = rospy.Time.now()
pose.header.frame_id = '/world'
pose.pose.position.x = x
pose.pose.position.y = y
pose.pose.position.z = z
q = tf.transformations.quaternion_from_euler(0., 0., math.pi * yaw/180.)
pose.pose.orientation = Quaternion(*q)
return pose
def create_float(self, val):
fl = Float()
fl.data = val
return fl
def create_twist(self, velocity, angular):
tw = TwistStamped()
tw.twist.linear.x = velocity
tw.twist.angular.z = angular
return tw
def create_steer(self, val):
st = SteeringReport()
st.steering_wheel_angle_cmd = val * math.pi/180.
st.enabled = True
st.speed = self.vel
return st
def calc_angular(self, yaw):
angular_vel = 0.
if self.yaw is not None:
angular_vel = (yaw - self.yaw)/(rospy.get_time() - self.prev_time)
self.yaw = yaw
self.prev_time = rospy.get_time()
return angular_vel
def create_point_cloud_message(self, pts):
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = '/world'
cloud_message = pcl2.create_cloud_xyz32(header, pts)
return cloud_message
def broadcast_transform(self, name, position, orientation):
br = tf.TransformBroadcaster()
br.sendTransform(position,
orientation,
rospy.Time.now(),
name,
"world")
def publish_odometry(self, data):
pose = self.create_pose(data['x'], data['y'], data['z'], data['yaw'])
position = (data['x'], data['y'], data['z'])
orientation = tf.transformations.quaternion_from_euler(0, 0, math.pi * data['yaw']/180.)
self.broadcast_transform("base_link", position, orientation)
self.publishers['current_pose'].publish(pose)
self.vel = data['velocity']* 0.44704
self.angular = self.calc_angular(data['yaw'] * math.pi/180.)
self.publishers['current_velocity'].publish(self.create_twist(self.vel, self.angular))
def publish_controls(self, data):
steering, throttle, brake = data['steering_angle'], data['throttle'], data['brake']
self.publishers['steering_report'].publish(self.create_steer(steering))
self.publishers['throttle_report'].publish(self.create_float(throttle))
self.publishers['brake_report'].publish(self.create_float(brake))
def publish_obstacles(self, data):
for obs in data['obstacles']:
pose = self.create_pose(obs[0], obs[1], obs[2])
self.publishers['obstacle'].publish(pose)
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = '/world'
cloud = pcl2.create_cloud_xyz32(header, data['obstacles'])
self.publishers['obstacle_points'].publish(cloud)
def publish_lidar(self, data):
self.publishers['lidar'].publish(self.create_point_cloud_message(zip(data['lidar_x'], data['lidar_y'], data['lidar_z'])))
def publish_traffic(self, data):
x, y, z = data['light_pos_x'], data['light_pos_y'], data['light_pos_z'],
yaw = [math.atan2(dy, dx) for dx, dy in zip(data['light_pos_dx'], data['light_pos_dy'])]
status = data['light_state']
lights = TrafficLightArray()
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = '/world'
lights.lights = [self.create_light(*e) for e in zip(x, y, z, yaw, status)]
self.publishers['trafficlights'].publish(lights)
def publish_dbw_status(self, data):
self.publishers['dbw_status'].publish(Bool(data))
def publish_camera(self, data):
imgString = data["image"]
image = PIL_Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
image_message = self.bridge.cv2_to_imgmsg(image_array, encoding="rgb8")
self.publishers['image'].publish(image_message)
def callback_steering(self, data):
self.server('steer', data={'steering_angle': str(data.steering_wheel_angle_cmd)})
def callback_throttle(self, data):
self.server('throttle', data={'throttle': str(data.pedal_cmd)})
def callback_brake(self, data):
self.server('brake', data={'brake': str(data.pedal_cmd)})
def callback_path(self, data):
x_values = []
y_values = []
z_values = []
for waypoint in data.waypoints:
x = waypoint.pose.pose.position.x
y = waypoint.pose.pose.position.y
z = waypoint.pose.pose.position.z+0.5
x_values.append(x)
y_values.append(y)
z_values.append(z)
self.server('drawline', data={'next_x': x_values, 'next_y': y_values, 'next_z': z_values})
| {
"content_hash": "fbbda466ad6eae7194c7e329a5769c02",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 129,
"avg_line_length": 34.048780487804876,
"alnum_prop": 0.610458452722063,
"repo_name": "DavidObando/carnd",
"id": "400ce704de4ed74b63769306de5566bea5883100",
"size": "6981",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Term3/Project3/ros/src/styx/bridge.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1026"
},
{
"name": "C",
"bytes": "397957"
},
{
"name": "C++",
"bytes": "19427858"
},
{
"name": "CMake",
"bytes": "287149"
},
{
"name": "CSS",
"bytes": "5383"
},
{
"name": "Cuda",
"bytes": "131738"
},
{
"name": "Dockerfile",
"bytes": "2543"
},
{
"name": "Fortran",
"bytes": "1326303"
},
{
"name": "HTML",
"bytes": "5743866"
},
{
"name": "JavaScript",
"bytes": "7839"
},
{
"name": "Jupyter Notebook",
"bytes": "28650718"
},
{
"name": "Makefile",
"bytes": "3707"
},
{
"name": "Python",
"bytes": "327683"
},
{
"name": "Shell",
"bytes": "25869"
}
],
"symlink_target": ""
} |
'''
Run this script inside of src/ and it will look for all the files
that were changed this year that still have the last year in the
copyright headers, and it will fix the headers on that file using
a perl regex one liner.
For example: if it finds something like this and we're in 2014
// Copyright (c) 2009-2013 The Bitcoin developers
it will change it to
// Copyright (c) 2009-2014 The Bitcoin developers
It will do this for all the files in the folder and its children.
Author: @gubatron
'''
import os
import time
year = time.gmtime()[0]
last_year = year - 1
command = "perl -pi -e 's/%s The Greencoin/%s The Greencoin/' %s"
listFilesCommand = "find . | grep %s"
extensions = [".cpp",".h"]
def getLastGitModifiedDate(filePath):
gitGetLastCommitDateCommand = "git log " + filePath +" | grep Date | head -n 1"
p = os.popen(gitGetLastCommitDateCommand)
result = ""
for l in p:
result = l
break
result = result.replace("\n","")
return result
n=1
for extension in extensions:
foundFiles = os.popen(listFilesCommand % extension)
for filePath in foundFiles:
filePath = filePath[1:-1]
if filePath.endswith(extension):
filePath = os.getcwd() + filePath
modifiedTime = getLastGitModifiedDate(filePath)
if len(modifiedTime) > 0 and str(year) in modifiedTime:
print n,"Last Git Modified: ", modifiedTime, " - ", filePath
os.popen(command % (last_year,year,filePath))
n = n + 1
| {
"content_hash": "211b0fb6871cc00b31dc617705fa3edb",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 81,
"avg_line_length": 28,
"alnum_prop": 0.6929945054945055,
"repo_name": "CavityGap/greencoin",
"id": "65da7d9465503cc9053955ccb7590c505e47755d",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/devtools/fix-copyright-headers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "318383"
},
{
"name": "C++",
"bytes": "3548413"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2102"
},
{
"name": "M4",
"bytes": "138611"
},
{
"name": "Makefile",
"bytes": "81748"
},
{
"name": "Objective-C",
"bytes": "4381"
},
{
"name": "Objective-C++",
"bytes": "7186"
},
{
"name": "Protocol Buffer",
"bytes": "2316"
},
{
"name": "Python",
"bytes": "207943"
},
{
"name": "QMake",
"bytes": "2021"
},
{
"name": "Roff",
"bytes": "18163"
},
{
"name": "Shell",
"bytes": "45054"
}
],
"symlink_target": ""
} |
import numpy
from chainer import functions
from chainer import testing
def _decorrelated_batch_normalization(x, mean, projection, groups):
xs = numpy.split(x, groups, axis=1)
assert mean.shape[0] == groups
assert projection.shape[0] == groups
ys = [
_decorrelated_batch_normalization_1group(xi, m, p)
for (xi, m, p) in zip(xs, mean, projection)]
return numpy.concatenate(ys, axis=1)
def _decorrelated_batch_normalization_1group(x, mean, projection):
spatial_ndim = len(x.shape[2:])
spatial_axis = tuple(range(2, 2 + spatial_ndim))
b, C = x.shape[:2]
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
y_hat = projection.dot(x_hat - mean[:, None])
y = y_hat.reshape((C, b) + x.shape[2:]).transpose(
(1, 0) + spatial_axis)
return y
def _calc_projection(x, mean, eps, groups):
xs = numpy.split(x, groups, axis=1)
assert mean.shape[0] == groups
projections = [
_calc_projection_1group(xi, m, eps)
for (xi, m) in zip(xs, mean)]
return numpy.concatenate([p[None] for p in projections])
def _calc_projection_1group(x, mean, eps):
spatial_ndim = len(x.shape[2:])
spatial_axis = tuple(range(2, 2 + spatial_ndim))
b, C = x.shape[:2]
m = b
for i in spatial_axis:
m *= x.shape[i]
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
mean = x_hat.mean(axis=1)
x_hat = x_hat - mean[:, None]
cov = x_hat.dot(x_hat.T) / m + eps * numpy.eye(C, dtype=x.dtype)
eigvals, eigvectors = numpy.linalg.eigh(cov)
projection = eigvectors.dot(numpy.diag(eigvals ** -0.5)).dot(eigvectors.T)
return projection
def _calc_mean(x, groups):
axis = (0,) + tuple(range(2, x.ndim))
return x.mean(axis=axis).reshape(groups, -1)
@testing.parameterize(*(testing.product({
'n_channels': [8],
'ndim': [0, 2],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float32],
'contiguous': ['C', None],
}) + testing.product({
'n_channels': [8],
'ndim': [1],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
# NOTE(crcrpar): np.linalg.eigh does not support float16
'dtype': [numpy.float32, numpy.float64],
'contiguous': ['C', None],
})))
@testing.backend.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ [{'use_cuda': True}]
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0']
})
)
class TestDecorrelatedBatchNormalization(testing.FunctionTestCase):
# TODO(crcrpar): Delete this line once double backward of
# :func:`~chainer.functions.decorrelated_batch_normalization` is
# implemented.
skip_double_backward_test = True
def setUp(self):
check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
check_backward_options = {'atol': 1e-3, 'rtol': 1e-3}
if self.dtype == numpy.float16:
check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
elif self.dtype == numpy.float32:
check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_forward_options = check_forward_options
self.check_backward_options = check_backward_options
def generate_inputs(self):
dtype = self.dtype
ndim = self.ndim
shape = (5, self.n_channels) + (2,) * ndim
m = 5 * 2 ** ndim
# NOTE(kataoka): The current implementation uses linalg.eigh. Small
# eigenvalues of the correlation matrix, which can be as small as
# eps=2e-5, cannot be computed with good *relative* accuracy, but
# the eigenvalues are used later as `eigvals ** -0.5`. Require the
# following is sufficiently large:
# min(eigvals[:k]) == min(singular_vals ** 2 / m + eps)
min_singular_value = 0.1
# NOTE(kataoka): Decorrelated batch normalization should be free from
# "stochastic axis swapping". Requiring a gap between singular values
# just hides mistakes in implementations.
min_singular_value_gap = 0.001
g = self.groups
zca_shape = g, self.n_channels // g, m
x = numpy.random.uniform(-1, 1, zca_shape)
mean = x.mean(axis=2, keepdims=True)
a = x - mean
u, s, vh = numpy.linalg.svd(a, full_matrices=False)
# Decrement the latter dim because of the constraint `sum(_) == 0`
k = min(zca_shape[1], zca_shape[2] - 1)
s[:, :k] += (
min_singular_value
+ min_singular_value_gap * numpy.arange(k)
)[::-1]
a = numpy.einsum('bij,bj,bjk->bik', u, s, vh)
x = a + mean
x = x.reshape((self.n_channels, shape[0]) + shape[2:]).swapaxes(0, 1)
x = x.astype(dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.decorrelated_batch_normalization(
x, groups=self.groups, eps=self.eps),
def forward_expected(self, inputs):
x, = inputs
groups = self.groups
mean = _calc_mean(x, groups)
projection = _calc_projection(x, mean, self.eps, groups)
return _decorrelated_batch_normalization(
x, mean, projection, groups),
@testing.parameterize(*(testing.product({
'n_channels': [8],
'ndim': [0, 1, 2],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float32],
'contiguous': ['C', None],
}) + testing.product({
'n_channels': [8],
'ndim': [1],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contiguous': ['C', None],
})))
@testing.backend.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ [{'use_cuda': True}]
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
)
class TestFixedDecorrelatedBatchNormalization(testing.FunctionTestCase):
# TODO(crcrpar): Delete this line once double backward of
# :func:`~chainer.functions.fixed_decorrelated_batch_normalization` is
# implemented.
skip_double_backward_test = True
def setUp(self):
C = self.n_channels // self.groups
dtype = self.dtype
self.mean = numpy.random.uniform(
-1, 1, (self.groups, C)).astype(dtype)
self.projection = numpy.random.uniform(
0.5, 1, (self.groups, C, C)).astype(dtype)
check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
if self.dtype == numpy.float32:
check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_forward_options = check_forward_options
self.check_backward_options = check_backward_options
def generate_inputs(self):
dtype = self.dtype
ndim = self.ndim
shape = (5, self.n_channels) + (2,) * ndim
x = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x,
def forward(self, inputs, device):
x, = inputs
mean = device.send_array(self.mean.copy())
projection = device.send_array(self.projection.copy())
return functions.fixed_decorrelated_batch_normalization(
x, mean, projection, groups=self.groups
),
def forward_expected(self, inputs):
x, = inputs
mean = self.mean.copy()
projection = self.projection.copy()
return _decorrelated_batch_normalization(
x, mean, projection, self.groups),
testing.run_module(__name__, __file__)
| {
"content_hash": "80ac56f9b613e736bd375c87ae19336c",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 78,
"avg_line_length": 34.030701754385966,
"alnum_prop": 0.5882201314602398,
"repo_name": "wkentaro/chainer",
"id": "abc0537ee2215f319680f8405578f1e0ab49b459",
"size": "7759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/normalization_tests/test_decorrelated_batch_normalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "1662966"
},
{
"name": "CMake",
"bytes": "50912"
},
{
"name": "Cuda",
"bytes": "178765"
},
{
"name": "Dockerfile",
"bytes": "3316"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "6041757"
},
{
"name": "Shell",
"bytes": "41813"
}
],
"symlink_target": ""
} |
"""
nestly is a collection of functions designed to make running software with
combinatorial choices of parameters easier.
"""
__version__ = '0.6.1'
from .core import Nest, nest_map, stripext
| {
"content_hash": "e030da0da5214072324b6962f7d39386",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 74,
"avg_line_length": 24.25,
"alnum_prop": 0.7422680412371134,
"repo_name": "fhcrc/nestly",
"id": "6d6adf2c3aa3803fe2462cc2dc6d1315af9fc851",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nestly/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "112"
},
{
"name": "Python",
"bytes": "63209"
}
],
"symlink_target": ""
} |
"""Add timeout to function calls."""
import signal
class CallTimeout(Exception):
pass
def handler(signum, frame):
funcname = frame.f_code.co_name
raise CallTimeout("{} timed out".format(funcname))
def wrap_timeout(func, timeout, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
res = None
# raises CallTimeout when timeout is reached
res = func(*args, **kwargs)
signal.alarm(0)
return res | {
"content_hash": "1e825eb422ce0298d802ba6c4f09449a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 56,
"avg_line_length": 18.1,
"alnum_prop": 0.6629834254143646,
"repo_name": "CristianCantoro/wikidump",
"id": "b54eb1d97f5cb4ffb12e05d39a7b7ce945e97275",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikidump/timeout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "112162"
},
{
"name": "Shell",
"bytes": "2445"
}
],
"symlink_target": ""
} |
from mlpython.learners.generic import Learner
import numpy as np
import theano as T
from theano.tensor.nnet import conv
from theano.tensor.signal import downsample
class TemporalNeuralNetwork(Learner):
def __init__(self,
lrFirstPhase = 0.001,
lrSecondPhase=0.001,
dc=0,
sizes=(50,100,150,200),
seed=1234,
parameter_initialization=None,
deltaDistance = 1,
n_epochs=10):
self.lrFirstPhase=lrFirstPhase
self.lrSecondPhase = lrSecondPhase
self.dc=dc
self.sizes=sizes
self.seed=seed
self.parameter_initialization = parameter_initialization
self.n_epochs=n_epochs
self.deltaDistance = deltaDistance
# internal variable keeping track of the number of training iterations since initialization
self.epoch = 0
self.params = []
self.train_batch = [0,0,0]
self.FIRST_PHASE = 0
self.SECOND_PHASE = 1
self.THIRD_PHASE = 2
def initialize(self, input_size, n_classes, batchsize):
"""
This method allocates memory for the fprop/bprop computations (DONE)
and initializes the parameters of the neural network (TODO)
"""
self.n_classes = n_classes
self.input_size = input_size
#########################
# Initialize parameters #
#########################
self.inputTensor1 = T.tensor.matrix("input1").reshape((batchsize,1,72,72))
self.inputTensor2 = T.tensor.matrix("input2").reshape((batchsize,1,72,72))
self.rng = np.random.mtrand.RandomState(self.seed) # create random number generator
self.n_updates = 0 # To keep track of the number of updates, to decrease the learning rate
targets = T.tensor.ivector('target')
filter_shapes = [(self.sizes[0], 1, 3, 3), (self.sizes[1], self.sizes[0], 4, 4),
(self.sizes[2], self.sizes[1], 5, 5), (self.sizes[3], self.sizes[2], 6, 6)]
formattedSizes = [(batchsize,1,72,72),
(batchsize, self.sizes[0], 35, 35),
(batchsize, self.sizes[1], 16, 16),
(batchsize, self.sizes[2], 6, 6)]
nnet1 = []
for i in range(7):
if i % 2 != 0:
nnet1.append(self.createPoolingLayer(nnet1[-1], (2, 2), filter_shapes[i/2]))
else:
if i == 0:
nnet1.append(abs(self.createConvolutionLayer(self.inputTensor1, filter_shapes[i/2], formattedSizes[i/2])))
else:
nnet1.append(abs(self.createConvolutionLayer(nnet1[-1], filter_shapes[i/2], formattedSizes[i/2])))
nnet2 = []
for i in range(7):
if i % 2 != 0:
nnet2.append(self.createPoolingLayerUsingParams(nnet2[-1], (2, 2), self.params[i]))
else:
if i == 0:
nnet2.append(abs(self.createConvolutionLayerUsingParams(self.inputTensor2, filter_shapes[i/2], formattedSizes[i/2], self.params[i])))
else:
nnet2.append(abs(self.createConvolutionLayerUsingParams(nnet2[-1], filter_shapes[i/2], formattedSizes[i/2],self.params[i])))
output_layer = [self.createSigmoidLayer(nnet1[-1].flatten(2), self.sizes[-1], 1),
self.createSigmoidLayerUsingParams(nnet2[-1].flatten(2), self.params[-2],self.params[-1])]
numberOfValuesLastLayer = (filter_shapes[3][0] * np.prod(filter_shapes[3][2:]))
cost_FirstPhase = self.training_loss(output_layer[0], targets)
cost_SecondPhase = self.similarLossFunction([nnet1[-1],nnet2[-1]], batchsize, numberOfValuesLastLayer)
cost_ThirdPhase = self.dissimilarLossFunction([nnet1[-1],nnet2[-1]], batchsize, numberOfValuesLastLayer)
grads_FirstPhase = T.tensor.grad(cost_FirstPhase, self.params)
#We stop before the last layer, being the output layer, hence the self.params[:-2] (W and B)
grads_SecondPhase = T.tensor.grad(cost_SecondPhase, self.params[:-2])
grads_ThirdPhase = T.tensor.grad(cost_ThirdPhase, self.params[:-2])
n_updates = T.shared(np.cast[T.config.floatX](0))
updates_FirstPhase = [self.update_param(param_i, grad_i, n_updates, self.lrFirstPhase) for param_i, grad_i in zip(self.params, grads_FirstPhase)]
updates_FirstPhase += [(n_updates, n_updates + 1.)]
updates_SecondPhase = [self.update_param(param_i, grad_i, n_updates, self.lrSecondPhase) for param_i, grad_i in zip(self.params[:-2], grads_SecondPhase)]
updates_SecondPhase += [(n_updates, n_updates + 1.)]
updates_ThirdPhase = [self.update_param(param_i, grad_i, n_updates, self.lrSecondPhase) for param_i, grad_i in zip(self.params[:-2], grads_ThirdPhase)]
updates_ThirdPhase += [(n_updates, n_updates + 1.)]
self.train_batch[self.FIRST_PHASE] = T.function([self.inputTensor1, targets], cost_FirstPhase, updates=updates_FirstPhase,
allow_input_downcast=True)
self.train_batch[self.SECOND_PHASE] = T.function([self.inputTensor1, self.inputTensor2], cost_SecondPhase, updates=updates_SecondPhase,
allow_input_downcast=True)
self.train_batch[self.THIRD_PHASE] = T.function([self.inputTensor1, self.inputTensor2], cost_ThirdPhase, updates=updates_ThirdPhase,
allow_input_downcast=True)
testLayer = output_layer[1]
inputTensorToSelect = self.inputTensor2
nll = -T.tensor.log(testLayer)#-T.tensor.mean(T.tensor.log(output_layer))
self.cost_function = T.function([inputTensorToSelect], nll, allow_input_downcast=True)
self.pred_y = T.function([inputTensorToSelect], T.tensor.argmax(testLayer, axis=1),
allow_input_downcast=True)
self.theano_fprop = T.function([inputTensorToSelect], testLayer,allow_input_downcast=True)
def similarLossFunction(self, layersOfInterest,batchSize, layerSizes):
return (layersOfInterest[0] - layersOfInterest[1]).norm(1) /layerSizes
def dissimilarLossFunction(self, layersOfInterest, batchSize, layerSizes):
return T.tensor.max((0, self.deltaDistance - ((layersOfInterest[0] - layersOfInterest[1]).norm(1) / layerSizes)))#/ batchSize / layerSizes))
def update_param(self, param_i, grad_i, n_updates, lr):
return param_i, param_i - grad_i * (lr / (1. + (n_updates * self.dc)))
def createConvolutionLayer(self, input, filter_shape, image_shape):
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]))
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
W = T.shared(
np.asarray(
self.rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=T.config.floatX
),
borrow=True
)
conv_out = conv.conv2d(input=input,
filters=W,
filter_shape=filter_shape,
image_shape=image_shape
)
self.params.append(W)
return conv_out
def createConvolutionLayerUsingParams(self, input, filter_shape, image_shape, W):
return conv.conv2d(input=input,
filters=W,
filter_shape=filter_shape,
image_shape=image_shape
)
def createPoolingLayer(self, input, poolsize, prev_filter_shape):
pool_out = downsample.max_pool_2d(input=input, ds=poolsize, ignore_border=True)
b_values = np.zeros((prev_filter_shape[0],), dtype=T.config.floatX)
b = T.shared(value=b_values, borrow=True)
self.params.append(b)
return T.tensor.tanh(pool_out + b.dimshuffle('x', 0, 'x', 'x'))
def createPoolingLayerUsingParams(self, input, poolsize, b):
pool_out = downsample.max_pool_2d(input=input, ds=poolsize, ignore_border=True)
return T.tensor.tanh(pool_out + b.dimshuffle('x', 0, 'x', 'x'))
def createSigmoidLayer(self, input, nkerns, img_size):
W = T.shared(
value=np.zeros(
(nkerns*img_size, self.n_classes),
dtype=T.config.floatX
),
name='sigmoid W',
borrow=True
)
b = T.shared(
value = np.zeros((self.n_classes,), dtype=T.config.floatX),
name = 'sigmoid b',
borrow=True
)
self.params.append(W)
self.params.append(b)
return T.tensor.nnet.softmax(T.tensor.dot(input, W) + b)
def createSigmoidLayerUsingParams(self, input, W, b):
return T.tensor.nnet.softmax(T.tensor.dot(input, W) + b)
def train(self,trainset):
"""
Trains the neural network until it reaches a total number of
training epochs of ``self.n_epochs`` since it was
initialize. (DONE)
Field ``self.epoch`` keeps track of the number of training
epochs since initialization, so training continues until
``self.epoch == self.n_epochs``.
If ``self.epoch == 0``, first initialize the model.
"""
batchsize = trainset.metadata['minibatch_size']
if self.epoch == 0:
input_size = trainset.metadata['input_size']
n_classes = len(trainset.metadata['targets'])
print "initialize ..."
self.initialize(input_size, n_classes, batchsize)
print "done"
for it in range(self.epoch,self.n_epochs):
for input, target in trainset:
consecutivesFrames = trainset.data.getConsecutivesFrames(batchsize)
nonConsecutivesFrames = trainset.data.getNonConsecutivesFrames(batchsize)
firstScore = self.train_batch[self.FIRST_PHASE](input.reshape(batchsize,1,72,72), target)
secondScore = self.train_batch[self.SECOND_PHASE](consecutivesFrames[0].reshape(batchsize,1,72,72), consecutivesFrames[1].reshape(batchsize,1,72,72))
thirdScore = self.train_batch[self.THIRD_PHASE](nonConsecutivesFrames[0].reshape(batchsize,1,72,72),nonConsecutivesFrames[1].reshape(batchsize,1,72,72))
print(firstScore,secondScore,thirdScore)
self.n_updates += 1
self.epoch = self.n_epochs
def training_loss(self,output,target):
"""
Returns the negative log likelyhood (NLL) for a given minibatch
:param output: Theano tensor representing the output function of the preceding layer
:param target: Vector that gives for each example the correct label
:return: nll : Theano tensor representing the NLL
"""
return -T.tensor.mean(T.tensor.log(output)[T.tensor.arange(target.shape[0]), target])
def use(self,dataset):
"""
Computes and returns the outputs of the Learner for
``dataset``:
- the outputs should be a Numpy 2D array of size
len(dataset) by (nb of classes + 1)
- the ith row of the array contains the outputs for the ith example
- the outputs for each example should contain
the predicted class (first element) and the
output probabilities for each class (following elements)
"""
outputs = np.zeros((len(dataset)*dataset.metadata['minibatch_size'], self.n_classes+1))
t=0
for input,target in dataset:
input = input.reshape(dataset.metadata['minibatch_size'],1,72,72)
preds = self.pred_y(input)
nlls = self.cost_function(input)
for output, nll in zip(preds, nlls):
outputs[t,0] = output
outputs[t,1:] = nll
t += 1
# outputs[t,0] = self.pred_y(input)
# outputs[t,1:] = self.cost_function(input)#self.theano_fprop(input)
# t += 1
return outputs
def forget(self):
pass
def test(self,dataset):
"""
Computes and returns the outputs of the Learner as well as the errors of
those outputs for ``dataset``:
- the errors should be a Numpy 2D array of size
len(dataset) by 2
- the ith row of the array contains the errors for the ith example
- the errors for each example should contain
the 0/1 classification error (first element) and the
regularized negative log-likelihood (second element)
"""
outputs = self.use(dataset)
errors = np.zeros((len(dataset)*dataset.metadata['minibatch_size'], 2))
t=0
for input,targets in dataset:
for target in targets:
output = outputs[t,:]
errors[t,0] = output[0] != target
errors[t,1] = output[int(target) + 1]#self.training_loss(output[1:],target)
t+=1
return outputs, errors
| {
"content_hash": "6b8475370a83ccaecba0357633103c6a",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 168,
"avg_line_length": 42.72929936305732,
"alnum_prop": 0.5973019303868227,
"repo_name": "martarek/temporalcoherence",
"id": "fc9e13c4e3c92653320d401547c4739b6293e8d0",
"size": "13417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "temporal_nnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38135"
},
{
"name": "TeX",
"bytes": "38171"
}
],
"symlink_target": ""
} |
for i in range(7):
# define the page size
newPage(150, 100)
# draw a random background
fill(random(), random(), random())
rect(10, 10, width()-20, height()-20)
# set a fill
fill(1)
# draw some text
text("Hello World %s!" % (i+1), (20, 40))
# save only page 3 as pdf
if i == 3:
saveImage(["~/Desktop/firstImage.pdf"], multipage=False)
# save each page as a separate png
saveImage(["~/Desktop/firstImage.png"], multipage=True)
| {
"content_hash": "98718ea6dd79fc9d107e7ed538bb67ce",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 23,
"alnum_prop": 0.5942028985507246,
"repo_name": "bitforks/drawbot",
"id": "07399e0543b0737f5e3e2e0fa6cdeb7c4039153c",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/saveImage2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "259643"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
import os
from util.message import EN, Msg
# TODO: use a const DIR_DELIM evaluate once, instead of evaluate each time
def get_delim():
import platform
system_name = platform.system()
if "Darwin" in system_name:
return "/"
elif "Linux" in system_name:
return "/"
elif "Windows" in system_name:
return "\\"
return "/" # default treats it an unix-like system
NA = -1
__DATA_HOME = (os.environ['HOME'] if 'HOME' in os.environ else '') + get_delim() + 'reminder' + get_delim()
__SLIDESHOW_FREQUENCY = 30 # the frequency in second to have slideshow
__PHRASE_APPEAR_RATIO = 50 # a fixed percentage ratio (0-100) to show phrase
__SEARCH_LATENCY = 1
__LANG = EN
__API_KEY = ''
__CX = ''
__FULLSCREEN_MODE2 = False
def set_fullscreen_mode2(fullscreen_mode2):
global __FULLSCREEN_MODE2
__FULLSCREEN_MODE2 = fullscreen_mode2
def get_fullscreen_mode2():
return __FULLSCREEN_MODE2
def set_lang(lang):
global __LANG
__LANG = lang
def get_lang():
return __LANG
def get_msg(msg_id):
return Msg.get(__LANG, msg_id)
def set_api_key(api_key):
global __API_KEY
__API_KEY = api_key
def set_cx(cx):
global __CX
__CX = cx
def get_api_key():
return __API_KEY if "" != __API_KEY else None
def get_cx():
return __CX if "" != __CX else None
def set_search_latency(latency):
assert latency >= 1
global __SEARCH_LATENCY
__SEARCH_LATENCY = latency
def get_search_latency():
return __SEARCH_LATENCY
def get_slideshow_frequency():
return __SLIDESHOW_FREQUENCY
def set_slideshow_frequency(slideshow_frequency):
assert slideshow_frequency > 0
global __SLIDESHOW_FREQUENCY
__SLIDESHOW_FREQUENCY = slideshow_frequency
def set_phrase_appear_ratio(ratio):
assert 0 <= ratio <= 100
global __PHRASE_APPEAR_RATIO
__PHRASE_APPEAR_RATIO = ratio
def get_phrase_appear_ratio():
return __PHRASE_APPEAR_RATIO
def set_data_home(home):
global __DATA_HOME
__DATA_HOME = home
assert len(__DATA_HOME) > 0
if __DATA_HOME[-1] != get_delim():
__DATA_HOME += get_delim()
def get_data_home():
return __DATA_HOME
def get_user_config_file():
return __DATA_HOME + "config.ini"
def config_action():
config_file = get_user_config_file()
if config_file:
from util.config import Config
Config(config_file).set_general_setting()
class CustomPrint(object):
def __init__(self, verbose):
self.verbose = verbose
def set_verbose(self, verbose):
self.verbose = verbose
def show(self, *msg):
if self.verbose:
print(*msg)
# noinspection PyMethodMayBeStatic
def info(self, *msg):
print("[" + get_msg(Msg.information) + "]", *msg)
# noinspection PyMethodMayBeStatic
def error(self, *msg):
print("[" + get_msg(Msg.error) + "]", *msg)
__OUT = CustomPrint(False)
def set_verbose(verbose):
global __OUT
__OUT.set_verbose(verbose)
def get_verbose():
return __OUT.verbose
def show(*msg):
__OUT.show(*msg)
def info(*msg):
__OUT.info(*msg)
def error(*msg):
__OUT.error(*msg)
| {
"content_hash": "194643f4a07aeeb15fc931bd31218a18",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 107,
"avg_line_length": 19.509090909090908,
"alnum_prop": 0.6281453867660765,
"repo_name": "r-kan/reminder",
"id": "49085093214d43b8942f63e514f42b9b8024135f",
"size": "3266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/global_def.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91224"
}
],
"symlink_target": ""
} |
"""
Agents (used for dumping data) and Teachers (for training models) related to the TOD
conversation setup.
As a convention, agents and teachers that are inheritable are prefixed with "Tod"
whereas those that can be used as-is are not. Similarly, classes and functions that do
not need to be exposed outside of this file are prefixed with a single underscore ('_')
"""
from parlai.core.agents import Agent
from parlai.core.message import Message
from parlai.core.metrics import AverageMetric
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.teachers import DialogTeacher
from parlai.utils.data import DatatypeHelper
from parlai.utils.distributed import is_distributed, get_rank, num_workers
import parlai.core.tod.tod_core as tod
from parlai.core.tod.tod_core import SerializationHelpers
from parlai.core.tod.teacher_metrics import SlotMetrics, NlgMetrics
from typing import Optional, List
import json
import pickle
import difflib
import random
from math import ceil
######### Agents that dump information from a dataset; base classes
class TodStructuredDataParser(Agent):
"""
Base class that specifies intermediate representations for Tod conversations.
Inherit from this class and implement `setup_episodes()` to implement the intermediate representation for a specific dataset. Use multiple inheritence with classes that implement an `act()` below to use.
For example, if we have a `MyDataset_DataParser(TodStructuredDataParser)` and wanted to make a teacher to train a model to generate User Utterances based on a goal prompt, we would do so by defining `class MyDatasetUserSimulatorTeacher(MyDataset_DataParser, TodUserSimulatorTeacher)`.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
if hasattr(super(), "add_cmdline_args"):
parser = super().add_cmdline_args(parser, partial_opt)
group = parser.add_argument_group("TOD StructuredData agent")
group.add_argument(
"--episodes-randomization-seed",
type=int,
default=-1,
help="Randomize episodes in a predictable way (eg, for few shot). Set to -1 for no randomization. ",
)
parser.add_argument(
"--n-shot",
default=-1,
type=int,
help="Number of dialogues to keep for each of train/valid/test. -1 means all. Dialogues of lower numbers are strict subsets of larger numbers. Do not use in conjunction with `--percent-shot`. Use `--episodes-randomization-seed` to change seed. NOTE: Beware of using this flag when multitasking as this will apply to *all* datasets unless the ':' syntax for specifying per-dataset flags is used.",
)
parser.add_argument(
"--percent-shot",
default=-1,
type=float,
help="Percentage of dialogues to keep for each of train/valid/test. -1 means all. Dialogues of lower numbers are strict subsets of larger numbers. Do not use in conjunction with `--n-shot`. Use `--episodes-randomization-seed` to change seed. NOTE: Beware of using this flag when multitasking as this will apply to *all* datasets unless the ':' syntax for specifying per-dataset flags is used.",
)
return parser
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
self.id = self.get_id_task_prefix() + "_" + self._get_agent_type_suffix()
if shared is None:
self.episodes = self.generate_episodes()
else:
self.episodes = shared["episodes"]
def share(self):
share = super().share()
share["episodes"] = self.episodes
return share
def setup_episodes(self, fold: str) -> List[tod.TodStructuredEpisode]:
"""
Fold here is a data fold.
"""
raise NotImplementedError(
"Must have method for generating an episode. Must be set in downstream Parser for a given task"
)
def generate_episodes(self) -> List[tod.TodStructuredEpisode]:
if self.opt.get("n_shot", -1) >= 0 and self.opt.get("percent_shot", -1) >= 0:
# Validate before spending a while to load eeverything
raise RuntimeError("Both `--n-shot` and `--percent-shot` in use!")
episodes = list(self.setup_episodes(self.fold))
if self.opt.get("episodes_randomization_seed", -1) != -1:
random.Random(self.opt["episodes_randomization_seed"]).shuffle(episodes)
if self.opt.get("n_shot", -1) != -1:
episodes = episodes[: self.opt["n_shot"]]
elif self.opt.get("percent_shot", -1) >= 0:
episodes = episodes[: int(len(episodes) * self.opt["percent_shot"])]
return episodes
def get_id_task_prefix(self) -> str:
"""
Convenience for setting IDs.
"""
raise NotImplementedError(
"Must set ID prefix in downstream task agent. Must be set in downsream Parser for a given task"
)
def _get_agent_type_suffix(self) -> str:
"""
Convenience for setting IDs.
"""
raise NotImplementedError(
"Must set in downstream agent within `tod_agents`. If you see this error, something is wrong with TOD Infrastructure"
)
######### Agents that dump information from a dataset as gold (explicitly should *not* be used with teachers)
class _TodDataDumpAgent(TodStructuredDataParser):
"""
For agents which dump data from some dataset, without training/other modifications.
Since we have to deal with batching inside of agents (as per ParlAI convention for
non-generative agents), this does so while also implementing an "epoch done" to
denote elements in a batch that are past the end of the epoch.
"""
def __init__(self, opt: Opt, shared=None):
if not hasattr(self, "fold"):
self.fold = DatatypeHelper.fold(opt["datatype"])
super().__init__(opt, shared)
self.epochDone = False
self.batchsize = opt.get("batchsize", 1)
self.max_episodes = len(self.episodes)
if opt.get("num_episodes", 0) > 0:
self.max_episodes = min(self.max_episodes, opt.get("num_episodes"))
self.episode_idx = opt.get("batchindex", 0)
self._setup_next_episode()
self.round_idx = 0 # for some downstream utt + sysUttAndApiCallAgents.
if is_distributed(): # cause gotta manually handle
rank = get_rank()
chunk_size = ceil(self.max_episodes / num_workers())
self.episode_idx += rank * chunk_size
self.max_episodes = min(self.max_episodes, (rank + 1) * chunk_size)
def _setup_next_episode(self):
self.epochDone = self.episode_idx >= self.max_episodes
self.episode = None
if not self.epochDone:
self.episode = self.episodes[self.episode_idx]
self.round_idx = (
0 # so downstream agents know which round they are in. Update in `act()`
)
def epoch_done(self) -> bool:
return self.epochDone
def episode_done(self) -> bool:
raise RuntimeError("Must be defined in downstream agent")
def num_episodes(self) -> int:
return len(self.episodes)
def reset(self):
self.episode_idx += self.batchsize
self._setup_next_episode()
class TodGoalAgent(_TodDataDumpAgent):
"""
Use as a mixin with a dataset parser class that includes `generate_episodes()` of
TodStructuredDataParser.
Dumps out all goal calls from an episode.
"""
def act(self):
return {
"text": f"{tod.STANDARD_GOAL}{self.episode.goal_calls_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
def _get_agent_type_suffix(self):
return "Goal"
def episode_done(self) -> bool:
# done if end of batch; should never end conversation otherwise
return self.epoch_done()
class TodApiSchemaAgent(_TodDataDumpAgent):
"""
Use as a mixin with a dataset parser class that includes `generate_episodes()` of
TodStructuredDataParser.
Dumps out api schemas associated with an episode, based on what is manually set in
the dataset parser.
"""
def act(self):
return {
"text": f"{tod.STANDARD_API_SCHEMAS}{self.episode.api_schemas_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
def _get_agent_type_suffix(self):
return "ApiSchema"
def episode_done(self) -> bool:
# done if end of batch; should never end conversation otherwise
return self.epoch_done()
############# Single Goal + Api Schema Agent
class _EpisodeToSingleGoalProcessor(_TodDataDumpAgent):
"""
Iterate through all of the goals of a dataset, one by one.
Slightly different logic than the dump agent since how we count + setup examples for
an episode are different
Used as a mixin in the SingleGoal and SingleApiSchema agents below.
This class exposes a `filter_goals()` function that can be overridden by downstream agents.
"""
def __init__(self, opt: Opt, shared=None):
super().__init__(opt, shared)
self.epochDone = False
if shared is None:
self.episodes = self._setup_single_goal_episodes()
else:
# Handled fine in _TodDataDumpAgent
pass
self.max_episodes = len(self.episodes)
if opt.get("num_episodes", 0) > 0:
self.max_episodes = min(self.max_episodes, opt.get("num_episodes"))
if is_distributed(): # cause gotta manually handle
rank = get_rank()
chunk_size = ceil(self.max_episodes / num_workers())
self.max_episodes = min(self.max_episodes, (rank + 1) * chunk_size)
self._setup_next_episode()
def _setup_single_goal_episodes(self) -> List[tod.TodStructuredEpisode]:
"""
This function assumes that `self.setup_episodes()` has already been called
prior.
Based on the `__init__` order of this class, it should be done in
`TodStructuredDataParser` by this point.
"""
raw_episodes = self.episodes
result = []
for raw in raw_episodes:
for call in self.filter_goals(raw.goal_calls_machine):
schema = {}
for cand in raw.api_schemas_machine:
if (
cand[tod.STANDARD_API_NAME_SLOT]
== call[tod.STANDARD_API_NAME_SLOT]
):
schema = cand
result.append(
tod.TodStructuredEpisode(
domain=raw.domain,
api_schemas_machine=[schema],
goal_calls_machine=[call],
rounds=[],
)
)
return result
def filter_goals(self, goals):
"""
Some downstream agents may want to filter the goals.
Override this if so.
"""
return goals
class TodSingleGoalAgent(_EpisodeToSingleGoalProcessor, TodGoalAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Takes goals of an episode and splits them into single versions. (That is, if an episode has 3 goal API calls, this makes it such that those 3 goal API calls become the grounding for 3 separate episodes.)
NOTE: If an API schema agent is used, this *must* be used with `TodSingleApiSchemaAgent` since it will be nonsensicle otherwise. Additionally, this agent will not function properly with UserUtt + SystemUttAndApiCall agent, since episodes will not align.
"""
def _get_agent_type_suffix(self):
return "SingleGoal"
class TodSingleApiSchemaAgent(_EpisodeToSingleGoalProcessor, TodApiSchemaAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Takes the schema provided for an episode and filters these to match the single Goal provided by TodSingelGoalAgent.
NOTE: Must be used with TodSingleGoalAgent since nonsensicle otherwise. Additionally, this agent will not function properly with UserUtt + SystemUttAndApiCall agent, since episodes will not align.
"""
def _get_agent_type_suffix(self):
return "SingleApiSchema"
###### Agents used for calculating TOD World Metrics based on a dataset. See `tod_world_script` or `parlai/projects/tod_simulator/` for examples.
class TodUserUttAgent(_TodDataDumpAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Agent provided as a convenience to run TOD World script code on a dataset without having to write too much code to do so. (Ex. for a quick way to dump data to a `.jsonl` file for generating data for ACUTE or to generate a report file of metrics from TodWorld script.)
This represents the "User" agent.
This class should only ever be used with the model-model chat world which will stop
upon seeing the '[DONE]' utterance; may go out of bounds otherwise.
"""
def act(self):
result = {
"text": f"{tod.STANDARD_USER_UTTERANCE}{self.episode.rounds[self.round_idx].user_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
self.round_idx += 1
return result
def reset(self):
super().reset() # setup next episode
self.round_idx = 0
def _get_agent_type_suffix(self):
return "User"
def episode_done(self) -> bool:
return self.epoch_done() or self.round_idx >= len(self.episode.rounds)
class TodApiCallAndSysUttAgent(_TodDataDumpAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Agent provided as a convenience to run TOD World script code on a dataset without having to write too much code to do so. (Ex. for a quick way to dump data to a `.jsonl` file for generating data for ACUTE or to generate a report file of metrics from TodWorld script.)
This class represents the System and will generate both API Calls and System Utterances.
This class should only ever be used with the model-model chat world which will stop
upon seeing the '[DONE]' utterance; may go out of bounds otherwise.
"""
def __init__(self, opt: Opt, shared=None):
# This class will have `act()` called on it twice per round — once for API call and once for NLG — so need to make sure we don't increment episode number (reset) prematurely; use the `already_reset` flag for this.
self.already_reset = False
self.api_call_turn = True
super().__init__(opt, shared)
def act(self):
self.already_reset = False
if tod.STANDARD_API_SCHEMAS in self.observation.get("text", ""):
return {
"text": tod.STANDARD_API_SCHEMAS, # Default convention for the first turn
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
if self.api_call_turn: # comes first, don't iterate round #
result = {
"text": f"{tod.STANDARD_CALL}{self.episode.rounds[self.round_idx].api_call_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
self.api_call_turn = False
else:
result = {
"text": f"{tod.STANDARD_SYSTEM_UTTERANCE}{self.episode.rounds[self.round_idx].sys_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
self.round_idx += 1
self.api_call_turn = True
return result
def reset(self):
if not self.already_reset:
super().reset() # setup next episode
self.api_call_turn = True
self.already_reset = True
def _get_agent_type_suffix(self):
return "System"
def episode_done(self) -> bool:
return self.epoch_done() or self.round_idx >= len(self.episode.rounds)
class TodApiResponseAgent(_TodDataDumpAgent):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Agent provided as a convenience to run TOD World script code on a dataset without having to write too much code to do so. (Ex. for a quick way to dump data to a `.jsonl` file for generating data for ACUTE or to generate a report file of metrics from TodWorld script.)
This class represents the Api Response mechanism.
This class should only ever be used with the model-model chat world which will stop
upon seeing the '[DONE]' utterance; may go out of bounds otherwise.
"""
def act(self):
if tod.STANDARD_API_SCHEMAS in self.observation.get("text", ""):
return {
"text": tod.STANDARD_API_SCHEMAS, # Default convention
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
result = {
"text": f"{tod.STANDARD_RESP}{self.episode.rounds[self.round_idx].api_resp_utt}",
"id": self.id,
"domain": self.episode.domain,
"episode_done": False,
}
self.round_idx += 1
return result
def reset(self):
super().reset() # setup next episode
self.round_idx = 0
def _get_agent_type_suffix(self):
return "ApiResponse"
def episode_done(self) -> bool:
return self.epoch_done() or self.round_idx >= len(self.episode.rounds)
###### Standalone API agent
class StandaloneApiAgent(Agent):
"""
Trainable agent that saves API calls and responses.
Use `TodStandaloneApiTeacher` to train this class. For example for a MultiWoz V2.2
standalone API, use ``` parlai train -t multiwoz_v22:StandaloneApiTeacher -m
parlai.core.tod.tod_agents:StandaloneApiAgent -eps 4 -mf output ``` to generate the
`.pickle` file to use.
"""
EMPTY_RESP = {
"text": tod.STANDARD_RESP,
"id": "StandaloneApiAgent",
"episode_done": False,
}
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
group = parser.add_argument_group("TOD Standalone API args")
group.add_argument(
"--exact-api-call",
type=bool,
default=True,
help="Validation-time flag. If true, will return '' if exact api call values not found. If false, will pick response from the same intent with similar api parameters (assuming intent is the same when available)",
)
group.add_argument(
"--fail-hard",
type=bool,
default=False,
help="Aids in deugging. Will throw exception if API call not found and '--exact-api-call' is set.",
)
group.add_argument(
"--standalone-api-file",
type=str,
default=None,
help="Path to file holding `.pickle` of standalone api for validation (will intelligently strip if suffix included). If not set, assumes the `model_file` argument will contain the `.pickle` file. ",
)
return parser
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = "StandaloneApiAgent"
file_key = "model_file"
if self.opt["standalone_api_file"] is not None:
file_key = "standalone_api_file"
self.path_base = self.opt[file_key].replace(".pickle", "")
self.db_path = self.path_base + ".pickle"
self.exact_api_call = self.opt["exact_api_call"]
try:
with (open(self.db_path, "rb")) as openfile:
self.data = pickle.load(openfile)
self.training = True
print("Loaded Standalone API data successfully")
if self.exact_api_call != self.data.get("exact_api_call", True):
raise RuntimeError(
f"Standalone API .pickle file generated with `exact_api_call` of {self.data.get('exact_api_call', False)} but StandaloneApiAgent sets it to {self.exact_api_call}"
)
except Exception:
print(f"No file at {self.db_path}; ASSUMING WE ARE TRAINING")
self.data = {}
self.data["exact_api_call"] = self.exact_api_call
self.training = True
def _maybe_filter_prefix(self, text, prefix):
if prefix in text:
return text[len(prefix) :].strip()
return text.strip()
def act(self):
if not self.observation["text"].startswith(tod.STANDARD_CALL):
return self.EMPTY_RESP
call_text_raw = self.observation["text"]
# decode then reencode the API call so that we get the API calls in a consistent order
call_text = SerializationHelpers.api_dict_to_str(
SerializationHelpers.str_to_api_dict(
call_text_raw[len(tod.STANDARD_CALL) :]
)
)
if "labels" in self.observation:
return self._do_train(call_text)
return self._do_fetch(call_text)
def _do_train(self, call_text):
assert self.training is True
self.data[call_text] = self.observation["labels"][0]
return self.EMPTY_RESP
def _do_fetch(self, call_text):
if self.exact_api_call:
if self.opt.get("fail_hard", False):
resp = self.data[call_text]
else:
resp = self.data.get(call_text, tod.STANDARD_RESP)
return {"text": resp, "id": self.id, "episode_done": False}
# Not exact case
best_key = difflib.get_close_matches(call_text, self.data.keys(), 1)
if len(best_key) == 0:
return self.EMPTY_RESP
return {
"text": self.data.get(best_key[0], tod.STANDARD_RESP),
"id": self.id,
"episode_done": False,
}
def shutdown(self):
if self.training:
with (open(self.db_path, "wb")) as openfile:
pickle.dump(self.data, openfile)
print(f"Dumped output to {self.db_path}")
with open(self.path_base + ".opt", "w") as f:
json.dump(self.opt, f)
######### Empty agents
class EmptyApiSchemaAgent(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt)
self.id = "EmptyApiSchemaAgent"
def act(self):
msg = {
"id": self.getID(),
"text": tod.STANDARD_API_SCHEMAS,
"episode_done": False,
}
return Message(msg)
class EmptyGoalAgent(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt)
self.id = "EmptyGoalAgent"
def act(self):
msg = {"id": self.getID(), "text": tod.STANDARD_GOAL, "episode_done": False}
return Message(msg)
############# Teachers
class TodSystemTeacher(TodStructuredDataParser, DialogTeacher):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
TOD agent teacher which produces both API calls and NLG responses.
First turn is API Schema grounding, which may be a an empty schema.
Subsequent turns alternate between
1. User utterance -> API Call
2. API Response -> System Utterance
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser = super().add_cmdline_args(parser, partial_opt)
parser.add_argument(
"--api-schemas",
type="bool",
default=False,
help="Preempt first turn with intents + required/optional parameters as key/value for given domain",
)
parser.add_argument(
"--api-jga-record",
type=bool,
default=True,
help="Breaks out jga into individual api schemas",
)
parser.add_argument(
"--domain-jga-record",
type=bool,
default=False,
help="Breaks out jga into individual domains",
)
parser.add_argument(
"--domain-nlg-record",
type=bool,
default=False,
help="Breaks out nlg into individual domains",
)
return parser
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self._num_examples_cache = sum([len(x.rounds) * 2 + 1 for x in self.episodes])
self._num_episodes_cache = len(self.episodes)
def custom_evaluation(
self, teacher_action: Message, labels, model_response: Message
):
resp = model_response.get("text")
if not resp:
return
if teacher_action["type"] == tod.STANDARD_CALL:
if resp.startswith(tod.STANDARD_CALL):
resp = resp[len(tod.STANDARD_CALL) :]
predicted = SerializationHelpers.str_to_api_dict(resp)
domains = (
[teacher_action["domain"]] if self.opt["domain_jga_record"] else []
)
metrics = SlotMetrics(
teacher_slots=teacher_action["slots"],
predicted_slots=predicted,
prefixes=domains,
).report()
for key, value in metrics.items():
self.metrics.add(key, value)
if self.opt["api_jga_record"] and len(teacher_action["slots"]) > 0:
teacher = teacher_action["slots"]
slots = list(teacher.keys())
slots.remove(tod.STANDARD_API_NAME_SLOT)
api_here = (
"api-"
+ teacher[tod.STANDARD_API_NAME_SLOT]
+ "--"
+ "-".join(slots)
)
self.metrics.add(f"{api_here}/jga", AverageMetric(teacher == predicted))
elif teacher_action["type"] == tod.STANDARD_SYSTEM_UTTERANCE:
domains = (
[teacher_action["domain"]] if self.opt["domain_nlg_record"] else []
)
metrics = NlgMetrics(guess=resp, labels=labels, prefixes=domains).report()
for key, value in metrics.items():
self.metrics.add(key, value)
def setup_data(self, fold):
for episode in self.generate_episodes():
if self.opt.get("api_schemas"):
schemas = episode.api_schemas_utt
else:
schemas = ""
yield {
"text": f"{tod.STANDARD_API_SCHEMAS}{schemas}",
"label": f"{tod.STANDARD_API_SCHEMAS}",
"domain": episode.domain,
"type": tod.STANDARD_API_SCHEMAS,
"slots": {},
}, True
for r in episode.rounds:
yield {
"text": f"{tod.STANDARD_USER_UTTERANCE}{r.user_utt}",
"label": f"{tod.STANDARD_CALL}{r.api_call_utt}",
"domain": episode.domain,
"type": tod.STANDARD_CALL,
"slots": r.api_call_machine,
}, False
yield {
"text": f"{tod.STANDARD_RESP}{r.api_resp_utt}",
"label": f"{tod.STANDARD_SYSTEM_UTTERANCE}{r.sys_utt}",
"domain": episode.domain,
"slots": r.api_resp_machine,
"type": tod.STANDARD_SYSTEM_UTTERANCE,
}, False
def _get_agent_type_suffix(self):
return "SystemTeacher"
class TodUserSimulatorTeacher(TodStructuredDataParser, DialogTeacher):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Teacher that has `Goal->User Utterance` for its first turn, then `System
Utterance->User Utterance` for all subsequent turns.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
# Manually set number of examples + number of episodes
self._num_examples_cache = sum([len(x.rounds) for x in self.episodes])
self._num_episodes_cache = len(self.episodes)
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser = super().add_cmdline_args(parser, partial_opt)
parser.add_argument(
"--api-schemas",
type="bool",
default=False,
help="Preempt first turn with intents + required/optional parameters as key/value for given domain. NOOP for this teacher, but including to make sweeps easier",
)
return parser
def setup_data(self, fold):
for episode in self.generate_episodes():
if len(episode.rounds) < 1:
continue
yield {
"text": f"{tod.STANDARD_GOAL}{episode.goal_calls_utt}",
"label": f"{tod.STANDARD_USER_UTTERANCE}{episode.rounds[0].user_utt}",
"domain": episode.domain,
"type": tod.STANDARD_USER_UTTERANCE,
}, True
for i, r in enumerate(episode.rounds):
if i == len(episode.rounds) - 1:
continue
yield {
"text": f"{tod.STANDARD_SYSTEM_UTTERANCE}{r.sys_utt}",
"label": f"{tod.STANDARD_USER_UTTERANCE}{episode.rounds[i+1].user_utt}",
"domain": episode.domain,
"type": tod.STANDARD_USER_UTTERANCE,
"slots": {}, # slots in agent/user turns are meaningless
}, False
def custom_evaluation(
self, teacher_action: Message, labels, model_response: Message
):
resp = model_response.get("text")
if not resp:
return
if teacher_action["type"] == tod.STANDARD_RESP:
if resp.startswith(tod.STANDARD_RESP):
resp = resp[len(tod.STANDARD_RESP) :]
predicted = SerializationHelpers.str_to_api_dict(resp)
metrics = SlotMetrics(teacher_action["slots"], predicted).report()
for key, value in metrics.items():
self.metrics.add(key, value)
elif teacher_action["type"] == tod.STANDARD_USER_UTTERANCE:
metrics = NlgMetrics(resp, labels).report()
for key, value in metrics.items():
self.metrics.add(key, value)
def _get_agent_type_suffix(self):
return "UserSimulatorTeacher"
class TodStandaloneApiTeacher(TodStructuredDataParser, DialogTeacher):
"""
Use as a mixin with classes that also extend + implement TodStructuredDataParser.
Use this to generate a database for `StandaloneApiAgent`.
Set this as the teacher with `StandaloneApiAgent` as the agent. Ex for a MultiWoz
V2.2 standalone API, use ``` parlai train -t multiwoz_v22:StandaloneApiTeacher -m
parlai.core.tod.tod_agents:StandaloneApiAgent -eps 4 -mf output ```
"""
def setup_data(self, fold):
# As a default, just put everything in
for fold_overwrite in ["train", "valid", "test"]:
for episode in self.setup_episodes(fold_overwrite):
first = True
for r in episode.rounds:
if len(r.api_call_machine) > 0:
yield {
"text": f"{tod.STANDARD_CALL}{r.api_call_utt}",
"label": f"{tod.STANDARD_RESP}{r.api_resp_utt}",
"id": self.id,
"domain": episode.domain,
}, first
first = False
def _get_agent_type_suffix(self):
return "StandaloneApiTeacher"
| {
"content_hash": "c2069bc9d60d53256fed9de40b34263b",
"timestamp": "",
"source": "github",
"line_count": 819,
"max_line_length": 408,
"avg_line_length": 39.27106227106227,
"alnum_prop": 0.5991667443957343,
"repo_name": "facebookresearch/ParlAI",
"id": "6fb15fb731a63a84b15bb6e2faec7b4cea922ee2",
"size": "32366",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/core/tod/tod_agents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ContainerServiceVMDiagnostics(Model):
"""Profile for diagnostics on the container service VMs.
Variables are only populated by the server, and will be ignored when
sending a request.
:param enabled: Whether the VM diagnostic agent is provisioned on the VM.
:type enabled: bool
:ivar storage_uri: The URI of the storage account where diagnostics are
stored.
:vartype storage_uri: str
"""
_validation = {
'enabled': {'required': True},
'storage_uri': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(self, enabled):
self.enabled = enabled
self.storage_uri = None
| {
"content_hash": "b4651639780a137575b214a4306f7215",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 28.448275862068964,
"alnum_prop": 0.6290909090909091,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "8db4c84f62fee42c7eabd5324b0fcb1dfa5bfaa4",
"size": "1299",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/containerservice/v2017_01_31/models/container_service_vm_diagnostics.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
} |
import io
from hashlib import sha256
from ...encoding.hash import double_sha256
from ...encoding.bytes32 import from_bytes_32
from ...intbytes import byte2int, indexbytes
from ..SolutionChecker import SolutionChecker, ScriptError
from pycoin.satoshi import errno
from pycoin.satoshi.satoshi_struct import stream_struct
from pycoin.satoshi.satoshi_string import stream_satoshi_string
from pycoin.satoshi.flags import (
SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ANYONECANPAY,
VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM, VERIFY_CLEANSTACK, VERIFY_WITNESS
)
from .ScriptTools import BitcoinScriptTools
ZERO32 = b'\0' * 32
class SegwitChecker(SolutionChecker):
# you must set VM
# you must set ScriptTools
V0_len20_prefix = BitcoinScriptTools.compile("OP_DUP OP_HASH160")
V0_len20_postfix = BitcoinScriptTools.compile("OP_EQUALVERIFY OP_CHECKSIG")
OP_0 = BitcoinScriptTools.int_for_opcode("OP_0")
OP_1 = BitcoinScriptTools.int_for_opcode("OP_1")
OP_16 = BitcoinScriptTools.int_for_opcode("OP_16")
def _make_witness_sighash_f(self, tx_in_idx):
def witness_signature_for_hash_type(hash_type, sig_blobs, vm):
return self._signature_for_hash_type_segwit(
vm.script[vm.begin_code_hash:], tx_in_idx, hash_type)
return witness_signature_for_hash_type
def _puzzle_script_for_len20_segwit(self, witness_program):
return self.V0_len20_prefix + self.ScriptTools.compile_push_data_list(
[witness_program]) + self.V0_len20_postfix
def _check_witness_program_v0(self, witness_solution_stack, witness_program):
size = len(witness_program)
if size == 32:
if len(witness_solution_stack) == 0:
raise ScriptError("witness program witness empty", errno.WITNESS_PROGRAM_WITNESS_EMPTY)
puzzle_script = witness_solution_stack[-1]
if sha256(puzzle_script).digest() != witness_program:
raise ScriptError("witness program mismatch", errno.WITNESS_PROGRAM_MISMATCH)
stack = list(witness_solution_stack[:-1])
elif size == 20:
# special case for pay-to-pubkeyhash; signature + pubkey in witness
if len(witness_solution_stack) != 2:
raise ScriptError("witness program mismatch", errno.WITNESS_PROGRAM_MISMATCH)
puzzle_script = self._puzzle_script_for_len20_segwit(witness_program)
stack = list(witness_solution_stack)
else:
raise ScriptError("witness program wrong length", errno.WITNESS_PROGRAM_WRONG_LENGTH)
return stack, puzzle_script
def _witness_program_version(self, script):
size = len(script)
if size < 4 or size > 42:
return None
first_opcode = byte2int(script)
if indexbytes(script, 1) + 2 != size:
return None
if first_opcode == self.OP_0:
return 0
if self.OP_1 <= first_opcode <= self.OP_16:
return first_opcode - self.OP_1 + 1
return None
def _hash_prevouts(self, hash_type):
if hash_type & SIGHASH_ANYONECANPAY:
return ZERO32
f = io.BytesIO()
for tx_in in self.tx.txs_in:
f.write(tx_in.previous_hash)
stream_struct("L", f, tx_in.previous_index)
return double_sha256(f.getvalue())
def _hash_sequence(self, hash_type):
if (
(hash_type & SIGHASH_ANYONECANPAY) or
((hash_type & 0x1f) == SIGHASH_SINGLE) or
((hash_type & 0x1f) == SIGHASH_NONE)
):
return ZERO32
f = io.BytesIO()
for tx_in in self.tx.txs_in:
stream_struct("L", f, tx_in.sequence)
return double_sha256(f.getvalue())
def _hash_outputs(self, hash_type, tx_in_idx):
txs_out = self.tx.txs_out
if hash_type & 0x1f == SIGHASH_SINGLE:
if tx_in_idx >= len(txs_out):
return ZERO32
txs_out = txs_out[tx_in_idx:tx_in_idx+1]
elif hash_type & 0x1f == SIGHASH_NONE:
return ZERO32
f = io.BytesIO()
for tx_out in txs_out:
stream_struct("QS", f, tx_out.coin_value, tx_out.script)
return double_sha256(f.getvalue())
def _segwit_signature_preimage(self, script, tx_in_idx, hash_type):
f = io.BytesIO()
stream_struct("L", f, self.tx.version)
# calculate hash prevouts
f.write(self._hash_prevouts(hash_type))
f.write(self._hash_sequence(hash_type))
tx_in = self.tx.txs_in[tx_in_idx]
f.write(tx_in.previous_hash)
stream_struct("L", f, tx_in.previous_index)
tx_out = self.tx.unspents[tx_in_idx]
stream_satoshi_string(f, script)
stream_struct("Q", f, tx_out.coin_value)
stream_struct("L", f, tx_in.sequence)
f.write(self._hash_outputs(hash_type, tx_in_idx))
stream_struct("L", f, self.tx.lock_time)
stream_struct("L", f, hash_type)
return f.getvalue()
def _signature_for_hash_type_segwit(self, script, tx_in_idx, hash_type):
return from_bytes_32(double_sha256(self._segwit_signature_preimage(script, tx_in_idx, hash_type)))
def witness_program_tuple(self, tx_context, puzzle_script, solution_stack, flags, is_p2sh):
if not flags & VERIFY_WITNESS:
return
witness_version = self._witness_program_version(puzzle_script)
if witness_version is None:
if len(tx_context.witness_solution_stack) > 0:
raise ScriptError("witness unexpected", errno.WITNESS_UNEXPECTED)
else:
witness_program = puzzle_script[2:]
if len(solution_stack) > 0:
err = errno.WITNESS_MALLEATED_P2SH if is_p2sh else errno.WITNESS_MALLEATED
raise ScriptError("script sig is not blank on segwit input", err)
for s in tx_context.witness_solution_stack:
if len(s) > self.VM.MAX_BLOB_LENGTH:
raise ScriptError("pushing too much data onto stack", errno.PUSH_SIZE)
if witness_version == 0:
stack, puzzle_script = self._check_witness_program_v0(
tx_context.witness_solution_stack, witness_program)
sighash_f = self._make_witness_sighash_f(tx_context.tx_in_idx)
return puzzle_script, stack, flags | VERIFY_CLEANSTACK, sighash_f
elif flags & VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM:
raise ScriptError(
"this version witness program not yet supported", errno.DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM)
| {
"content_hash": "2085f09600836665b27c4b9748567403",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 114,
"avg_line_length": 41.15432098765432,
"alnum_prop": 0.6230688465576721,
"repo_name": "richardkiss/pycoin",
"id": "26a70b1ebd84ab26c7b454dfc51acbccfc35fc32",
"size": "6667",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pycoin/coins/bitcoin/SegwitChecker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "115"
},
{
"name": "Python",
"bytes": "752865"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
from iris_sdk.models.maps.base_map import BaseMap
class LoasMap(BaseMap):
file_count = None
file_data = None
file_names = None
result_code = None
result_message = None | {
"content_hash": "48c7d2e5ed3cdfe59ee635d05bec9f15",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 49,
"avg_line_length": 21,
"alnum_prop": 0.6825396825396826,
"repo_name": "bandwidthcom/python-bandwidth-iris",
"id": "2cd35ddbcec548ae28a00508800ee0c945a73d71",
"size": "212",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iris_sdk/models/maps/loas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "308732"
}
],
"symlink_target": ""
} |
from .rcmod import *
from .utils import *
from .palettes import *
from .linearmodels import *
from .categorical import *
from .distributions import *
from .timeseries import *
from .matrix import *
from .miscplot import *
from .axisgrid import *
from .widgets import *
from .xkcd_rgb import xkcd_rgb
from .crayons import crayons
set()
__version__ = "0.8.dev"
| {
"content_hash": "ea69f05c7e46e10bb70e0a56bc68982d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 30,
"avg_line_length": 22.5,
"alnum_prop": 0.7388888888888889,
"repo_name": "oesteban/seaborn",
"id": "e4881d378eec51513c9758908ca1e3e3d60df15d",
"size": "360",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "seaborn/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "342"
},
{
"name": "Python",
"bytes": "696473"
}
],
"symlink_target": ""
} |
"""Tests for IPython.core.application"""
import os
import tempfile
import nose.tools as nt
from traitlets import Unicode
from IPython.core.application import BaseIPythonApplication
from IPython.testing import decorators as dec
from IPython.utils.tempdir import TemporaryDirectory
@dec.onlyif_unicode_paths
def test_unicode_cwd():
"""Check that IPython starts with non-ascii characters in the path."""
wd = tempfile.mkdtemp(suffix=u"€")
old_wd = os.getcwd()
os.chdir(wd)
#raise Exception(repr(os.getcwd()))
try:
app = BaseIPythonApplication()
# The lines below are copied from Application.initialize()
app.init_profile_dir()
app.init_config_files()
app.load_config_file(suppress_errors=False)
finally:
os.chdir(old_wd)
@dec.onlyif_unicode_paths
def test_unicode_ipdir():
"""Check that IPython starts with non-ascii characters in the IP dir."""
ipdir = tempfile.mkdtemp(suffix=u"€")
# Create the config file, so it tries to load it.
with open(os.path.join(ipdir, 'ipython_config.py'), "w") as f:
pass
old_ipdir1 = os.environ.pop("IPYTHONDIR", None)
old_ipdir2 = os.environ.pop("IPYTHON_DIR", None)
os.environ["IPYTHONDIR"] = ipdir
try:
app = BaseIPythonApplication()
# The lines below are copied from Application.initialize()
app.init_profile_dir()
app.init_config_files()
app.load_config_file(suppress_errors=False)
finally:
if old_ipdir1:
os.environ["IPYTHONDIR"] = old_ipdir1
if old_ipdir2:
os.environ["IPYTHONDIR"] = old_ipdir2
def test_cli_priority():
with TemporaryDirectory() as td:
class TestApp(BaseIPythonApplication):
test = Unicode().tag(config=True)
# Create the config file, so it tries to load it.
with open(os.path.join(td, 'ipython_config.py'), "w") as f:
f.write("c.TestApp.test = 'config file'")
app = TestApp()
app.initialize(['--profile-dir', td])
nt.assert_equal(app.test, 'config file')
app = TestApp()
app.initialize(['--profile-dir', td, '--TestApp.test=cli'])
nt.assert_equal(app.test, 'cli')
| {
"content_hash": "0d99601822193102b3adb2ffed4b326e",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 76,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.6394830659536542,
"repo_name": "Foxfanmedium/python_training",
"id": "45d42727c9b6eb4025e15256526f990c5d0a0a09",
"size": "2264",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "OnlineCoursera/mail_ru/Python_1/env/Lib/site-packages/IPython/core/tests/test_application.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6290"
},
{
"name": "Gherkin",
"bytes": "891"
},
{
"name": "HTML",
"bytes": "6856"
},
{
"name": "Python",
"bytes": "116680"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals, print_function
import math
import re
import os
import textwrap
import warnings
from collections import OrderedDict, deque
import six
from six.moves import zip, cStringIO
import numpy as np
from functools import partial
try:
from inspect import getfullargspec as getargspec
except ImportError:
from inspect import getargspec
from itertools import groupby
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from monty.io import zopen
from monty.dev import requires
from pymatgen.util.coord import in_coord_list_pbc, pbc_diff, \
find_in_coord_list_pbc
from monty.string import remove_non_ascii
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import SpaceGroup, SYMM_DATA
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.electronic_structure.core import Magmom
from pymatgen.core.operations import MagSymmOp
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
try:
from pybtex.database import BibliographyData, Entry
except ImportError:
warnings.warn("Please install optional dependency pybtex if you"
"want to extract references from CIF files.")
BibliographyData, Entry = None, None
"""
Wrapper classes for Cif input and output from Structures.
"""
__author__ = "Shyue Ping Ong, Will Richards, Matthew Horton"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "4.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
sub_spgrp = partial(re.sub, r"[\s_]", "")
space_groups = {sub_spgrp(k): k
for k in SYMM_DATA['space_group_encoding'].keys()}
space_groups.update({sub_spgrp(k): k
for k in SYMM_DATA['space_group_encoding'].keys()})
_COD_DATA = None
def _get_cod_data():
global _COD_DATA
if _COD_DATA is None:
import pymatgen
with open(os.path.join(pymatgen.symmetry.__path__[0],
"symm_ops.json")) \
as f:
import json
_COD_DATA = json.load(f)
return _COD_DATA
class CifBlock(object):
maxlen = 70 # not quite 80 so we can deal with semicolons and things
def __init__(self, data, loops, header):
"""
Object for storing cif data. All data is stored in a single dictionary.
Data inside loops are stored in lists in the data dictionary, and
information on which keys are grouped together are stored in the loops
attribute.
Args:
data: dict or OrderedDict of data to go into the cif. Values should
be convertible to string, or lists of these if the key is
in a loop
loops: list of lists of keys, grouped by which loop they should
appear in
header: name of the block (appears after the data_ on the first
line)
"""
self.loops = loops
self.data = data
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self.header = header[:74]
def __eq__(self, other):
return self.loops == other.loops \
and self.data == other.data \
and self.header == other.header
def __getitem__(self, key):
return self.data[key]
def __str__(self):
"""
Returns the cif string for the data block
"""
s = ["data_{}".format(self.header)]
keys = self.data.keys()
written = []
for k in keys:
if k in written:
continue
for l in self.loops:
# search for a corresponding loop
if k in l:
s.append(self._loop_to_string(l))
written.extend(l)
break
if k not in written:
# k didn't belong to a loop
v = self._format_field(self.data[k])
if len(k) + len(v) + 3 < self.maxlen:
s.append("{} {}".format(k, v))
else:
s.extend([k, v])
return "\n".join(s)
def _loop_to_string(self, loop):
s = "loop_"
for l in loop:
s += '\n ' + l
for fields in zip(*[self.data[k] for k in loop]):
line = "\n"
for val in map(self._format_field, fields):
if val[0] == ";":
s += line + "\n" + val
line = "\n"
elif len(line) + len(val) + 2 < self.maxlen:
line += " " + val
else:
s += line
line = '\n ' + val
s += line
return s
def _format_field(self, v):
v = v.__str__().strip()
if len(v) > self.maxlen:
return ';\n' + textwrap.fill(v, self.maxlen) + '\n;'
# add quotes if necessary
if v == '':
return '""'
if (" " in v or v[0] == "_") \
and not (v[0] == "'" and v[-1] == "'") \
and not (v[0] == '"' and v[-1] == '"'):
if "'" in v:
q = '"'
else:
q = "'"
v = q + v + q
return v
@classmethod
def _process_string(cls, string):
# remove comments
string = re.sub(r"(\s|^)#.*$", "", string, flags=re.MULTILINE)
# remove empty lines
string = re.sub(r"^\s*\n", "", string, flags=re.MULTILINE)
# remove non_ascii
string = remove_non_ascii(string)
# since line breaks in .cif files are mostly meaningless,
# break up into a stream of tokens to parse, rejoining multiline
# strings (between semicolons)
q = deque()
multiline = False
ml = []
# this regex splits on spaces, except when in quotes.
# starting quotes must not be preceded by non-whitespace
# (these get eaten by the first expression)
# ending quotes must not be followed by non-whitespace
p = re.compile(r'''([^'"\s][\S]*)|'(.*?)'(?!\S)|"(.*?)"(?!\S)''')
for l in string.splitlines():
if multiline:
if l.startswith(";"):
multiline = False
q.append(('', '', '', ' '.join(ml)))
ml = []
l = l[1:].strip()
else:
ml.append(l)
continue
if l.startswith(";"):
multiline = True
ml.append(l[1:].strip())
else:
for s in p.findall(l):
# s is tuple. location of the data in the tuple
# depends on whether it was quoted in the input
q.append(s)
return q
@classmethod
def from_string(cls, string):
q = cls._process_string(string)
header = q.popleft()[0][5:]
data = OrderedDict()
loops = []
while q:
s = q.popleft()
# cif keys aren't in quotes, so show up in s[0]
if s[0] == "_eof":
break
if s[0].startswith("_"):
data[s[0]] = "".join(q.popleft())
elif s[0].startswith("loop_"):
columns = []
items = []
while q:
s = q[0]
if s[0].startswith("loop_") or not s[0].startswith("_"):
break
columns.append("".join(q.popleft()))
data[columns[-1]] = []
while q:
s = q[0]
if s[0].startswith("loop_") or s[0].startswith("_"):
break
items.append("".join(q.popleft()))
n = len(items) // len(columns)
assert len(items) % n == 0
loops.append(columns)
for k, v in zip(columns * n, items):
data[k].append(v.strip())
elif "".join(s).strip() != "":
warnings.warn("Possible error in cif format"
" error at {}".format("".join(s).strip()))
return cls(data, loops, header)
class CifFile(object):
"""
Reads and parses CifBlocks from a .cif file or string
"""
def __init__(self, data, orig_string=None, comment=None):
"""
Args:
data (OrderedDict): Of CifBlock objects.å
orig_string (str): The original cif string.
comment (str): Comment string.
"""
self.data = data
self.orig_string = orig_string
self.comment = comment or "# generated using pymatgen"
def __str__(self):
s = ["%s" % v for v in self.data.values()]
return self.comment + "\n" + "\n".join(s) + "\n"
@classmethod
def from_string(cls, string):
d = OrderedDict()
for x in re.split(r"^\s*data_", "x\n" + string,
flags=re.MULTILINE | re.DOTALL)[1:]:
# Skip over Cif block that contains powder diffraction data.
# Some elements in this block were missing from CIF files in
# Springer materials/Pauling file DBs.
# This block anyway does not contain any structure information, and
# CifParser was also not parsing it.
if 'powder_pattern' in re.split(r"\n", x, 1)[0]:
continue
c = CifBlock.from_string("data_" + x)
d[c.header] = c
return cls(d, string)
@classmethod
def from_file(cls, filename):
with zopen(filename, "rt", errors="replace") as f:
return cls.from_string(f.read())
class CifParser(object):
"""
Parses a cif file
Args:
filename (str): Cif filename. bzipped or gzipped cifs are fine too.
occupancy_tolerance (float): If total occupancy of a site is between 1
and occupancy_tolerance, the occupancies will be scaled down to 1.
site_tolerance (float): This tolerance is used to determine if two
sites are sitting in the same position, in which case they will be
combined to a single disordered site. Defaults to 1e-4.
"""
def __init__(self, filename, occupancy_tolerance=1., site_tolerance=1e-4):
self._occupancy_tolerance = occupancy_tolerance
self._site_tolerance = site_tolerance
if isinstance(filename, six.string_types):
self._cif = CifFile.from_file(filename)
else:
self._cif = CifFile.from_string(filename.read())
# store if CIF contains features from non-core CIF dictionaries
# e.g. magCIF
self.feature_flags = {}
self.errors = []
def is_magcif():
"""
Checks to see if file appears to be a magCIF file (heuristic).
"""
# Doesn't seem to be a canonical way to test if file is magCIF or
# not, so instead check for magnetic symmetry datanames
prefixes = ['_space_group_magn', '_atom_site_moment',
'_space_group_symop_magn']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif'] = is_magcif()
def is_magcif_incommensurate():
"""
Checks to see if file contains an incommensurate magnetic
structure (heuristic).
"""
# Doesn't seem to be a canonical way to test if magCIF file
# describes incommensurate strucure or not, so instead check
# for common datanames
if not self.feature_flags["magcif"]:
return False
prefixes = ['_cell_modulation_dimension', '_cell_wave_vector']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif_incommensurate'] = is_magcif_incommensurate()
for k in self._cif.data.keys():
# pass individual CifBlocks to _sanitize_data
self._cif.data[k] = self._sanitize_data(self._cif.data[k])
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.):
"""
Creates a CifParser from a string.
Args:
cif_string (str): String representation of a CIF.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
CifParser
"""
stream = cStringIO(cif_string)
return CifParser(stream, occupancy_tolerance)
def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(' + ')) > \
len(data["_atom_site_label"][idx].split(' + ')):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(' + ')
for elocc_idx in range(len(symbol_str_lst)):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(
r'\([0-9]*\)', '',
symbol_str_lst[elocc_idx].strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[str(re.findall(r'\D+', symbol_str_lst[
elocc_idx].strip())[1]).replace('<sup>', '')] = \
float('0' + re.findall(r'\.?\d+', symbol_str_lst[
elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
for et, occu in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(
et + '_fix' + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z))
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id]
if len(idxs_to_remove) > 0:
data.data["_atom_site_label"] += new_atom_site_label
data.data["_atom_site_type_symbol"] += new_atom_site_type_symbol
data.data["_atom_site_occupancy"] += new_atom_site_occupancy
data.data["_atom_site_fract_x"] += new_fract_x
data.data["_atom_site_fract_y"] += new_fract_y
data.data["_atom_site_fract_z"] += new_fract_z
"""
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
"""
if self.feature_flags["magcif"]:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = ["_space_group_symop_magn_operation.xyz",
"_space_group_symop_magn_centering.xyz",
"_space_group_magn.name_BNS",
"_space_group_magn.number_BNS",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
"_atom_site_moment_label"]
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = "_".join(correct_key.split("."))
test_key = "_".join(original_key.split("."))
if trial_key == test_key:
changes_to_make[correct_key] = original_key
# make changes
for correct_key, original_key in changes_to_make.items():
data.data[correct_key] = data.data[original_key]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {
"_magnetic_space_group.transform_to_standard_Pp_abc":
"_space_group_magn.transform_BNS_Pp_abc"}
changes_to_make = {}
for interim_key, final_key in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key
for final_key, interim_key in changes_to_make.items():
data.data[final_key] = data.data[interim_key]
return data
def _unique_coords(self, coords_in, magmoms_in=None, lattice=None):
"""
Generate unique coordinates using coord and symmetry positions
and also their corresponding magnetic moments, if supplied.
"""
coords = []
if magmoms_in:
magmoms = []
if len(magmoms_in) != len(coords_in):
raise ValueError
for tmp_coord, tmp_magmom in zip(coords_in, magmoms_in):
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if isinstance(op, MagSymmOp):
# Up to this point, magmoms have been defined relative
# to crystal axis. Now convert to Cartesian and into
# a Magmom object.
magmom = Magmom.from_moment_relative_to_crystal_axes(
op.operate_magmom(tmp_magmom),
lattice=lattice
)
else:
magmom = Magmom(tmp_magmom)
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
magmoms.append(magmom)
return coords, magmoms
else:
for tmp_coord in coords_in:
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
return coords, [Magmom(0)] * len(coords) # return dummy magmoms
def get_lattice(self, data, length_strings=("a", "b", "c"),
angle_strings=("alpha", "beta", "gamma"),
lattice_type=None):
"""
Generate the lattice from the provided lattice parameters. In
the absence of all six lattice parameters, the crystal system
and necessary parameters are parsed
"""
try:
lengths = [str2float(data["_cell_length_" + i])
for i in length_strings]
angles = [str2float(data["_cell_angle_" + i])
for i in angle_strings]
if not lattice_type:
return Lattice.from_lengths_and_angles(lengths, angles)
else:
return getattr(Lattice, lattice_type)(*(lengths + angles))
except KeyError:
# Missing Key search for cell setting
for lattice_lable in ["_symmetry_cell_setting",
"_space_group_crystal_system"]:
if data.data.get(lattice_lable):
lattice_type = data.data.get(lattice_lable).lower()
try:
required_args = getargspec(
getattr(Lattice, lattice_type)).args
lengths = (l for l in length_strings
if l in required_args)
angles = (a for a in angle_strings
if a in required_args)
return self.get_lattice(data, lengths, angles,
lattice_type=lattice_type)
except AttributeError as exc:
self.errors.append(str(exc))
warnings.warn(exc)
else:
return None
def get_symops(self, data):
"""
In order to generate symmetry equivalent positions, the symmetry
operations are parsed. If the symops are not present, the space
group symbol is parsed, and symops are generated.
"""
symops = []
for symmetry_label in ["_symmetry_equiv_pos_as_xyz",
"_symmetry_equiv_pos_as_xyz_",
"_space_group_symop_operation_xyz",
"_space_group_symop_operation_xyz_"]:
if data.data.get(symmetry_label):
xyz = data.data.get(symmetry_label)
if isinstance(xyz, six.string_types):
msg = "A 1-line symmetry op P1 CIF is detected!"
warnings.warn(msg)
self.errors.append(msg)
xyz = [xyz]
try:
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
break
except ValueError:
continue
if not symops:
# Try to parse symbol
for symmetry_label in ["_symmetry_space_group_name_H-M",
"_symmetry_space_group_name_H_M",
"_symmetry_space_group_name_H-M_",
"_symmetry_space_group_name_H_M_",
"_space_group_name_Hall",
"_space_group_name_Hall_",
"_space_group_name_H-M_alt",
"_space_group_name_H-M_alt_",
"_symmetry_space_group_name_hall",
"_symmetry_space_group_name_hall_",
"_symmetry_space_group_name_h-m",
"_symmetry_space_group_name_h-m_"]:
sg = data.data.get(symmetry_label)
if sg:
sg = sub_spgrp(sg)
try:
spg = space_groups.get(sg)
if spg:
symops = SpaceGroup(spg).symmetry_ops
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.errors.append(msg)
break
except ValueError:
# Ignore any errors
pass
try:
for d in _get_cod_data():
if sg == re.sub(r"\s+", "",
d["hermann_mauguin"]):
xyz = d["symops"]
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.errors.append(msg)
break
except Exception as ex:
continue
if symops:
break
if not symops:
# Try to parse International number
for symmetry_label in ["_space_group_IT_number",
"_space_group_IT_number_",
"_symmetry_Int_Tables_number",
"_symmetry_Int_Tables_number_"]:
if data.data.get(symmetry_label):
try:
i = int(str2float(data.data.get(symmetry_label)))
symops = SpaceGroup.from_int_number(i).symmetry_ops
break
except ValueError:
continue
if not symops:
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Defaulting to P1."
warnings.warn(msg)
self.errors.append(msg)
symops = [SymmOp.from_xyz_string(s) for s in ['x', 'y', 'z']]
return symops
def get_magsymops(self, data):
"""
Equivalent to get_symops except for magnetic symmetry groups.
Separate function since additional operation for time reversal symmetry
(which changes magnetic moments on sites) needs to be returned.
"""
magsymmops = []
# check to see if magCIF file explicitly contains magnetic symmetry operations
if data.data.get("_space_group_symop_magn_operation.xyz"):
xyzt = data.data.get("_space_group_symop_magn_operation.xyz")
if isinstance(xyzt, six.string_types):
xyzt = [xyzt]
magsymmops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
if data.data.get("_space_group_symop_magn_centering.xyz"):
xyzt = data.data.get("_space_group_symop_magn_centering.xyz")
if isinstance(xyzt, six.string_types):
xyzt = [xyzt]
centering_symops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
all_ops = []
for op in magsymmops:
for centering_op in centering_symops:
new_translation = [i - np.floor(i) for i
in
op.translation_vector + centering_op.translation_vector]
new_time_reversal = op.time_reversal * centering_op.time_reversal
all_ops.append(
MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=op.rotation_matrix,
translation_vec=new_translation,
time_reversal=new_time_reversal))
magsymmops = all_ops
# else check to see if it specifies a magnetic space group
elif data.data.get("_space_group_magn.name_BNS") or data.data.get(
"_space_group_magn.number_BNS"):
if data.data.get("_space_group_magn.name_BNS"):
# get BNS label for MagneticSpaceGroup()
id = data.data.get("_space_group_magn.name_BNS")
else:
# get BNS number for MagneticSpaceGroup()
# by converting string to list of ints
id = list(map(int, (
data.data.get("_space_group_magn.number_BNS").split("."))))
msg = MagneticSpaceGroup(id)
if data.data.get("_space_group_magn.transform_BNS_Pp_abc"):
if data.data.get(
"_space_group_magn.transform_BNS_Pp_abc") != "a,b,c;0,0,0":
return NotImplementedError(
"Non-standard settings not currently supported.")
elif data.data.get("_space_group_magn.transform_BNS_Pp"):
return NotImplementedError(
"Incomplete specification to implement.")
magsymmops = msg.symmetry_ops
if not magsymmops:
msg = "No magnetic symmetry detected, using primitive symmetry."
warnings.warn(msg)
self.errors.append(msg)
magsymmops = [MagSymmOp.from_xyzt_string("x, y, z, 1")]
return magsymmops
def parse_oxi_states(self, data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = \
str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states
def parse_magmoms(self, data, lattice=None):
"""
Parse atomic magnetic moments from data dictionary
"""
if lattice is None:
raise Exception(
'Magmoms given in terms of crystal axes in magCIF spec.')
try:
magmoms = {
data["_atom_site_moment_label"][i]:
np.array(
[str2float(data["_atom_site_moment_crystalaxis_x"][i]),
str2float(data["_atom_site_moment_crystalaxis_y"][i]),
str2float(data["_atom_site_moment_crystalaxis_z"][i])]
)
for i in range(len(data["_atom_site_moment_label"]))
}
except (ValueError, KeyError):
return None
return magmoms
def _parse_symbol(self, sym):
"""
Parse a string with a symbol to extract a string representing an element.
Args:
sym (str): A symbol to be parsed.
Returns:
A string with the parsed symbol. None if no parsing was possible.
"""
# Common representations for elements/water in cif files
# TODO: fix inconsistent handling of water
special = {"Hw": "H", "Ow": "O", "Wat": "O",
"wat": "O", "OH": "", "OH2": "", "NO3": "N"}
parsed_sym = None
# try with special symbols, otherwise check the first two letters,
# then the first letter alone. If everything fails try extracting the first letters.
m_sp = re.match("|".join(special.keys()), sym)
if m_sp:
parsed_sym = special[m_sp.group()]
elif Element.is_valid_symbol(sym[:2].title()):
parsed_sym = sym[:2].title()
elif Element.is_valid_symbol(sym[0].upper()):
parsed_sym = sym[0].upper()
else:
m = re.match(r"w?[A-Z][a-z]*", sym)
if m:
parsed_sym = m.group()
if parsed_sym is not None and (m_sp or not re.match("{}\d*".format(parsed_sym), sym)):
msg = "{} parsed as {}".format(sym, parsed_sym)
warnings.warn(msg)
self.errors.append(msg)
return parsed_sym
def _get_structure(self, data, primitive):
"""
Generate structure from part of the cif.
"""
def get_num_implicit_hydrogens(sym):
num_h = {"Wat": 2, "wat": 2, "O-H": 1}
return num_h.get(sym[:3], 0)
lattice = self.get_lattice(data)
# if magCIF, get magnetic symmetry moments and magmoms
# else standard CIF, and use empty magmom dict
if self.feature_flags["magcif_incommensurate"]:
raise NotImplementedError(
"Incommensurate structures not currently supported.")
elif self.feature_flags["magcif"]:
self.symmetry_operations = self.get_magsymops(data)
magmoms = self.parse_magmoms(data, lattice=lattice)
else:
self.symmetry_operations = self.get_symops(data)
magmoms = {}
oxi_states = self.parse_oxi_states(data)
coord_to_species = OrderedDict()
coord_to_magmoms = OrderedDict()
def get_matching_coord(coord):
keys = list(coord_to_species.keys())
coords = np.array(keys)
for op in self.symmetry_operations:
c = op.operate(coord)
inds = find_in_coord_list_pbc(coords, c,
atol=self._site_tolerance)
# cant use if inds, because python is dumb and np.array([0]) evaluates
# to False
if len(inds):
return keys[inds[0]]
return False
for i in range(len(data["_atom_site_label"])):
try:
# If site type symbol exists, use it. Otherwise, we use the
# label.
symbol = self._parse_symbol(data["_atom_site_type_symbol"][i])
num_h = get_num_implicit_hydrogens(
data["_atom_site_type_symbol"][i])
except KeyError:
symbol = self._parse_symbol(data["_atom_site_label"][i])
num_h = get_num_implicit_hydrogens(data["_atom_site_label"][i])
if not symbol:
continue
if oxi_states is not None:
o_s = oxi_states.get(symbol, 0)
# use _atom_site_type_symbol if possible for oxidation state
if "_atom_site_type_symbol" in data.data.keys():
oxi_symbol = data["_atom_site_type_symbol"][i]
o_s = oxi_states.get(oxi_symbol, o_s)
try:
el = Specie(symbol, o_s)
except:
el = DummySpecie(symbol, o_s)
else:
el = get_el_sp(symbol)
x = str2float(data["_atom_site_fract_x"][i])
y = str2float(data["_atom_site_fract_y"][i])
z = str2float(data["_atom_site_fract_z"][i])
magmom = magmoms.get(data["_atom_site_label"][i],
np.array([0, 0, 0]))
try:
occu = str2float(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
match = get_matching_coord(coord)
comp_d = {el: occu}
if num_h > 0:
comp_d["H"] = num_h
comp = Composition(comp_d)
if not match:
coord_to_species[coord] = comp
coord_to_magmoms[coord] = magmom
else:
coord_to_species[match] += comp
# disordered magnetic not currently supported
coord_to_magmoms[match] = None
sum_occu = [sum(c.values()) for c in coord_to_species.values()
if not set(c.elements) == {Element("O"), Element("H")}]
if any([o > 1 for o in sum_occu]):
msg = "Some occupancies (%s) sum to > 1! If they are within " \
"the tolerance, they will be rescaled." % str(sum_occu)
warnings.warn(msg)
self.errors.append(msg)
allspecies = []
allcoords = []
allmagmoms = []
allhydrogens = []
# check to see if magCIF file is disordered
if self.feature_flags["magcif"]:
for k, v in coord_to_magmoms.items():
if v is None:
# Proposed solution to this is to instead store magnetic
# moments as Specie 'spin' property, instead of site
# property, but this introduces ambiguities for end user
# (such as unintended use of `spin` and Specie will have
# fictious oxidation state).
raise NotImplementedError(
'Disordered magnetic structures not currently supported.')
if coord_to_species.items():
for comp, group in groupby(
sorted(list(coord_to_species.items()), key=lambda x: x[1]),
key=lambda x: x[1]):
tmp_coords = [site[0] for site in group]
tmp_magmom = [coord_to_magmoms[tmp_coord] for tmp_coord in
tmp_coords]
if self.feature_flags["magcif"]:
coords, magmoms = self._unique_coords(tmp_coords,
magmoms_in=tmp_magmom,
lattice=lattice)
else:
coords, magmoms = self._unique_coords(tmp_coords)
if set(comp.elements) == {Element("O"), Element("H")}:
# O with implicit hydrogens
im_h = comp["H"]
species = Composition({"O": comp["O"]})
else:
im_h = 0
species = comp
allhydrogens.extend(len(coords) * [im_h])
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
allmagmoms.extend(magmoms)
# rescale occupancies if necessary
for i, species in enumerate(allspecies):
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
allspecies[i] = species / totaloccu
if allspecies and len(allspecies) == len(allcoords) \
and len(allspecies) == len(allmagmoms):
site_properties = dict()
if any(allhydrogens):
assert len(allhydrogens) == len(allcoords)
site_properties["implicit_hydrogens"] = allhydrogens
if self.feature_flags["magcif"]:
site_properties["magmom"] = allmagmoms
if len(site_properties) == 0:
site_properties = None
struct = Structure(lattice, allspecies, allcoords,
site_properties=site_properties)
struct = struct.get_sorted_structure()
if primitive and self.feature_flags['magcif']:
struct = struct.get_primitive_structure(use_site_props=True)
elif primitive:
struct = struct.get_primitive_structure()
struct = struct.get_reduced_structure()
return struct
def get_structures(self, primitive=True):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True. With magnetic CIF files, will return primitive
magnetic cell which may be larger than nuclear primitive cell.
Returns:
List of Structures.
"""
structures = []
for d in self._cif.data.values():
try:
s = self._get_structure(d, primitive)
if s:
structures.append(s)
except (KeyError, ValueError) as exc:
# Warn the user (Errors should never pass silently)
# A user reported a problem with cif files produced by Avogadro
# in which the atomic coordinates are in Cartesian coords.
self.errors.append(str(exc))
warnings.warn(str(exc))
if len(structures) == 0:
raise ValueError("Invalid cif file with no structures!")
return structures
@requires(BibliographyData, "Bibliographic data extraction requires pybtex.")
def get_bibtex_string(self):
"""
Get BibTeX reference from CIF file.
:param data:
:return: BibTeX string
"""
bibtex_keys = {'author': ('_publ_author_name', '_citation_author_name'),
'title': ('_publ_section_title', '_citation_title'),
'journal': ('_journal_name_full', '_journal_name_abbrev',
'_citation_journal_full', '_citation_journal_abbrev'),
'volume': ('_journal_volume', '_citation_journal_volume'),
'year': ('_journal_year', '_citation_year'),
'number': ('_journal_number', '_citation_number'),
'page_first': ('_journal_page_first', '_citation_page_first'),
'page_last': ('_journal_page_last', '_citation_page_last'),
'doi': ('_journal_DOI', '_citation_DOI')}
entries = {}
# TODO: parse '_publ_section_references' when it exists?
# TODO: CIF specification supports multiple citations.
for idx, data in enumerate(self._cif.data.values()):
# convert to lower-case keys, some cif files inconsistent
data = {k.lower(): v for k, v in data.data.items()}
bibtex_entry = {}
for field, tags in bibtex_keys.items():
for tag in tags:
if tag in data:
bibtex_entry[field] = data[tag]
# convert to bibtex author format ('and' delimited)
if 'author' in bibtex_entry:
bibtex_entry['author'] = ' and '.join(bibtex_entry['author'])
# convert to bibtex page range format, use empty string if not specified
if ('page_first' in bibtex_entry) or ('page_last' in bibtex_entry):
bibtex_entry['pages'] = '{0}--{1}'.format(bibtex_entry.get('page_first', ''),
bibtex_entry.get('page_last', ''))
bibtex_entry.pop('page_first', None) # and remove page_first, page_list if present
bibtex_entry.pop('page_last', None)
# cite keys are given as cif-reference-idx in order they are found
entries['cif-reference-{}'.format(idx)] = Entry('article', list(bibtex_entry.items()))
return BibliographyData(entries).to_string(bib_format='bibtex')
def as_dict(self):
d = OrderedDict()
for k, v in self._cif.data.items():
d[k] = {}
for k2, v2 in v.data.items():
d[k][k2] = v2
return d
@property
def has_errors(self):
return len(self.errors) > 0
class CifWriter(object):
def __init__(self, struct, symprec=None, write_magmoms=False):
"""
A wrapper around CifFile to write CIF files from pymatgen structures.
Args:
struct (Structure): structure to write
symprec (float): If not none, finds the symmetry of the structure
and writes the cif with symmetry information. Passes symprec
to the SpacegroupAnalyzer
write_magmoms (bool): If True, will write magCIF file. Incompatible
with symprec
"""
if write_magmoms and symprec:
warnings.warn(
"Magnetic symmetry cannot currently be detected by pymatgen,"
"disabling symmetry detection.")
symprec = None
format_str = "{:.8f}"
block = OrderedDict()
loops = []
spacegroup = ("P 1", 1)
if symprec is not None:
sf = SpacegroupAnalyzer(struct, symprec)
spacegroup = (sf.get_space_group_symbol(),
sf.get_space_group_number())
# Needs the refined struture when using symprec. This converts
# primitive to conventional structures, the standard for CIF.
struct = sf.get_refined_structure()
latt = struct.lattice
comp = struct.composition
no_oxi_comp = comp.element_composition
block["_symmetry_space_group_name_H-M"] = spacegroup[0]
for cell_attr in ['a', 'b', 'c']:
block["_cell_length_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
for cell_attr in ['alpha', 'beta', 'gamma']:
block["_cell_angle_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
block["_symmetry_Int_Tables_number"] = spacegroup[1]
block["_chemical_formula_structural"] = no_oxi_comp.reduced_formula
block["_chemical_formula_sum"] = no_oxi_comp.formula
block["_cell_volume"] = "%.8f" % latt.volume
reduced_comp, fu = no_oxi_comp.get_reduced_composition_and_factor()
block["_cell_formula_units_Z"] = str(int(fu))
if symprec is None:
block["_symmetry_equiv_pos_site_id"] = ["1"]
block["_symmetry_equiv_pos_as_xyz"] = ["x, y, z"]
else:
sf = SpacegroupAnalyzer(struct, symprec)
symmops = []
for op in sf.get_symmetry_operations():
v = op.translation_vector
symmops.append(SymmOp.from_rotation_and_translation(
op.rotation_matrix, v))
ops = [op.as_xyz_string() for op in symmops]
block["_symmetry_equiv_pos_site_id"] = \
["%d" % i for i in range(1, len(ops) + 1)]
block["_symmetry_equiv_pos_as_xyz"] = ops
loops.append(["_symmetry_equiv_pos_site_id",
"_symmetry_equiv_pos_as_xyz"])
contains_oxidation = True
try:
symbol_to_oxinum = OrderedDict([
(el.__str__(),
float(el.oxi_state))
for el in sorted(comp.elements)])
except AttributeError:
symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in
sorted(comp.elements)])
contains_oxidation = False
if contains_oxidation:
block["_atom_type_symbol"] = symbol_to_oxinum.keys()
block["_atom_type_oxidation_number"] = symbol_to_oxinum.values()
loops.append(["_atom_type_symbol", "_atom_type_oxidation_number"])
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_label = []
atom_site_occupancy = []
atom_site_moment_label = []
atom_site_moment_crystalaxis_x = []
atom_site_moment_crystalaxis_y = []
atom_site_moment_crystalaxis_z = []
count = 1
if symprec is None:
for site in struct:
for sp, occu in sorted(site.species_and_occu.items()):
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
magmom = Magmom(
site.properties.get('magmom', getattr(sp, 'spin', 0)))
if write_magmoms and abs(magmom) > 0:
moment = Magmom.get_moment_relative_to_crystal_axes(
magmom, latt)
atom_site_moment_label.append(
"{}{}".format(sp.symbol, count))
atom_site_moment_crystalaxis_x.append("%.5f" % moment[0])
atom_site_moment_crystalaxis_y.append("%.5f" % moment[1])
atom_site_moment_crystalaxis_z.append("%.5f" % moment[2])
count += 1
else:
# The following just presents a deterministic ordering.
unique_sites = [
(sorted(sites, key=lambda s: tuple([abs(x) for x in
s.frac_coords]))[0],
len(sites))
for sites in sf.get_symmetrized_structure().equivalent_sites
]
for site, mult in sorted(
unique_sites,
key=lambda t: (t[0].species_and_occu.average_electroneg,
-t[1], t[0].a, t[0].b, t[0].c)):
for sp, occu in site.species_and_occu.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("%d" % mult)
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block["_atom_site_label"] = atom_site_label
block["_atom_site_symmetry_multiplicity"] = \
atom_site_symmetry_multiplicity
block["_atom_site_fract_x"] = atom_site_fract_x
block["_atom_site_fract_y"] = atom_site_fract_y
block["_atom_site_fract_z"] = atom_site_fract_z
block["_atom_site_occupancy"] = atom_site_occupancy
loops.append(["_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy"])
if write_magmoms:
block["_atom_site_moment_label"] = atom_site_moment_label
block[
"_atom_site_moment_crystalaxis_x"] = atom_site_moment_crystalaxis_x
block[
"_atom_site_moment_crystalaxis_y"] = atom_site_moment_crystalaxis_y
block[
"_atom_site_moment_crystalaxis_z"] = atom_site_moment_crystalaxis_z
loops.append(["_atom_site_moment_label",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z"])
d = OrderedDict()
d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)
self._cf = CifFile(d)
def __str__(self):
"""
Returns the cif as a string.
"""
return self._cf.__str__()
def write_file(self, filename):
"""
Write the cif file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
try:
# Note that the ending ) is sometimes missing. That is why the code has
# been modified to treat it as optional. Same logic applies to lists.
return float(re.sub(r"\(.+\)*", "", text))
except TypeError:
if isinstance(text, list) and len(text) == 1:
return float(re.sub(r"\(.+\)*", "", text[0]))
except ValueError as ex:
if text.strip() == ".":
return 0
raise ex
| {
"content_hash": "c30aa333c309b9308956b98cf1320061",
"timestamp": "",
"source": "github",
"line_count": 1339,
"max_line_length": 99,
"avg_line_length": 41.42046303211352,
"alnum_prop": 0.5055713822076376,
"repo_name": "nisse3000/pymatgen",
"id": "0c2790580d8c3251f380aedace0d0852f57b4a5b",
"size": "55573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/cif.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6934548"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
} |
import pysftp
with pysftp.Connection('192.168.8.1', username='root', password='843ea28d5f') as sftp:
print sftp.pwd
with sftp.cd("/tmp/mnt/mmcblk0p1/"):
print sftp.pwd
for i in sftp.listdir_attr():
print unicode(i)
# with sftp.cd('public'): # temporarily chdir to public
# sftp.put('/my/local/filename') # upload file to public/ on remote
#
# sftp.get_r('myfiles', '/backup') # recursively copy myfiles/ to local
| {
"content_hash": "5a01e1aabb44343101d158dabf7c4f5a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 86,
"avg_line_length": 40.583333333333336,
"alnum_prop": 0.6098562628336756,
"repo_name": "weijia/django-local-apps",
"id": "b016c38309a1a2843c76a0c52a2b8438cfe89e81",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_local_apps/management/commands/local_app_utils/sftp_uploader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2703"
},
{
"name": "Makefile",
"bytes": "1268"
},
{
"name": "Python",
"bytes": "49059"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import xml.etree.ElementTree as ET
from decimal import Decimal
from io import open
from builtins import str as text
import pytest
from satcfe.excecoes import ErroRespostaSATInvalida
from satcfe.excecoes import ExcecaoRespostaSAT
from satcfe.resposta import RespostaEnviarDadosVenda
from satcfe.util import as_datetime
from satcfe.util import str_to_base64
def test_respostas_de_sucesso(datadir):
arquivo_sucesso = text(datadir.join('respostas-de-sucesso.txt'))
arquivo_cfesat = text(datadir.join('cfe-autorizado.xml'))
with open(arquivo_sucesso, 'r', encoding='utf-8') as fresp, \
open(arquivo_cfesat, 'r', encoding='utf-8') as fxml:
r_sucessos = fresp.read().splitlines()
cfe_autorizado = fxml.read()
_CFe = ET.fromstring(cfe_autorizado)
_infCFe = _CFe.find('./infCFe')
chave_consulta = _infCFe.attrib['Id']
assinatura_qrcode = _infCFe.findtext('./ide/assinaturaQRCODE')
resposta = RespostaEnviarDadosVenda.analisar(r_sucessos[0])
assert resposta.numeroSessao == 123456
assert resposta.EEEEE == '06000'
assert resposta.CCCC == '0000'
assert resposta.arquivoCFeSAT == str_to_base64(cfe_autorizado)
assert resposta.timeStamp == as_datetime('20150718154423')
assert resposta.chaveConsulta == chave_consulta
assert resposta.valorTotalCFe == Decimal('2.00')
assert resposta.assinaturaQRCODE == assinatura_qrcode
# Evidente que esta não é a melhor maneira de comparar strings XML, mas é
# tudo o que é necessário, ou seja, o conteúdo XML apenas precisa ser
# retornado pelo método xml() para que esteja tudo OK;
assert resposta.xml()[:28] == '<?xml version="1.0"?>\n<CFe>\n'
# Aqui a mesma coisa, o método qrcode() só precisa resultar algo, já que o
# foco não é testar a produção da messa de dados do QRCode;
assert resposta.qrcode()[:9] == '351507087'
def test_respostas_de_falha(datadir):
arquivo = text(datadir.join('respostas-de-falha.txt'))
with open(arquivo, 'r', encoding='utf-8') as f:
respostas = f.read().splitlines()
for retorno in respostas:
with pytest.raises(ExcecaoRespostaSAT) as exsat:
RespostaEnviarDadosVenda.analisar(retorno)
assert hasattr(exsat.value, 'resposta')
resposta = exsat.value.resposta
with pytest.raises(ExcecaoRespostaSAT):
# quando a resposta não for sucesso, xml() deve falhar
resposta.xml()
with pytest.raises(ExcecaoRespostaSAT):
# quando a resposta não for sucesso, qrcode() deve falhar
resposta.qrcode()
def test_respostas_invalidas(datadir):
arquivo = text(datadir.join('respostas-invalidas.txt'))
with open(arquivo, 'r', encoding='utf-8') as f:
respostas = f.read().splitlines()
for retorno in respostas:
with pytest.raises(ErroRespostaSATInvalida):
RespostaEnviarDadosVenda.analisar(retorno)
@pytest.mark.acessa_sat
@pytest.mark.invoca_enviardadosvenda
def test_funcao_enviardadosvenda(clientesatlocal, cfevenda):
resposta = clientesatlocal.enviar_dados_venda(cfevenda)
assert resposta.EEEEE == '06000'
assert resposta.valorTotalCFe == Decimal('4.73')
assert len(resposta.chaveConsulta) == 47
assert resposta.chaveConsulta.startswith('CFe')
@pytest.mark.acessa_sat
@pytest.mark.invoca_enviardadosvenda
def test_emite_warning_argumentos_extras_ignorados(
clientesatlocal,
cfevenda):
conteudo_cfe = cfevenda.documento() # resolve o documento (obtendo str)
with pytest.warns(UserWarning) as rec:
resposta = clientesatlocal.enviar_dados_venda(
conteudo_cfe,
'argumentos',
'extras',
'informados',
argumentos=1,
extras=2,
informados=3)
assert len(rec) == 1
assert rec[0].message.args[0].startswith('O documento foi informado')
assert resposta.EEEEE == '06000'
@pytest.mark.acessa_sat
@pytest.mark.invoca_enviardadosvenda
def test_argumento_nao_str_sem_metodo_documento(clientesatlocal):
# se o argumento dados_venda não for str, então deverá ser um objeto
# que possua um método chamado "documento()" capaz de gerar o CF-e de
# venda que será enviado ao equipamento SAT
class _Quack(object):
pass
with pytest.raises(ValueError):
clientesatlocal.enviar_dados_venda(_Quack())
| {
"content_hash": "de91ba78411afefa90b42024c1277939",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 78,
"avg_line_length": 36.07142857142857,
"alnum_prop": 0.6906490649064907,
"repo_name": "base4sistemas/satcfe",
"id": "955ffc7ad2fd51993e97bc1b74a933d536c6eff8",
"size": "5215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_enviardadosvenda.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14820"
},
{
"name": "Makefile",
"bytes": "2932"
},
{
"name": "Python",
"bytes": "346214"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
import pyxb_114
import normal as custom
import raw.custom as raw_custom
import unittest
class TestComplex (unittest.TestCase):
def setUp (self):
xmls = file('test.xml').read()
self.instance = custom.CreateFromDocument(xmls)
def testRawSubclassHierarchy (self):
self.assertTrue(issubclass(raw_custom.tc01, raw_custom.ta0))
self.assertTrue(issubclass(raw_custom.tc02, raw_custom.ta0))
self.assertTrue(issubclass(raw_custom.tc03, raw_custom.ta0))
self.assertTrue(issubclass(raw_custom.ta04, raw_custom.ta0))
self.assertTrue(issubclass(raw_custom.tc041, raw_custom.ta04))
self.assertTrue(issubclass(raw_custom.tc042, raw_custom.ta04))
def testCustomSubclassesRaw(self):
self.assertTrue(issubclass(custom.tc01, raw_custom.tc01))
self.assertTrue(issubclass(custom.tc02, raw_custom.tc02))
self.assertTrue(issubclass(custom.tc03, raw_custom.tc03))
self.assertTrue(issubclass(custom.ta04, raw_custom.ta04))
self.assertTrue(issubclass(custom.tc041, raw_custom.tc041))
self.assertTrue(issubclass(custom.tc042, raw_custom.tc042))
def testCustomConcreteHierarchy(self):
self.assertFalse(issubclass(custom.tc01, custom.ta0))
self.assertTrue(issubclass(custom.tc02, custom.ta0))
self.assertFalse(issubclass(custom.tc03, custom.ta0))
def test_c01 (self):
ec01 = self.instance.ec01
self.assertTrue(isinstance(ec01, custom.tc01))
self.assertEqual(ec01.ea0, 'ec01')
self.assertEqual(ec01.ec01, 'c01')
# Direct customization works...
self.assertEqual(ec01.xc01(), 'extend tc01')
# No inheritance from customized superclass
self.assertRaises(AttributeError, lambda _i: _i.xa0, ec01)
def test_c02 (self):
# Dual-inheritance customization works
ec02 = self.instance.ec02
self.assertTrue(isinstance(ec02, custom.tc02))
self.assertEqual(ec02.ea0, 'ec02')
self.assertEqual(ec02.ec02_i, 2)
# Direct customization works
self.assertEqual(ec02.xc02(), 'extend tc02')
# Inherited customization works
self.assertEqual(ec02.xa0(), 'extend ta0')
def test_c03 (self):
ec03 = self.instance.ec03
self.assertTrue(isinstance(ec03, custom.tc03))
self.assertEqual(ec03.ea0, 'ec03')
self.assertTrue(ec03.ec03_b)
# No inheritance from customized superclass
self.assertRaises(AttributeError, lambda _i: _i.xa0, ec03)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "99969f905738d27f9557e565bfe45f68",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 70,
"avg_line_length": 39.738461538461536,
"alnum_prop": 0.6755710414246999,
"repo_name": "msherry/PyXB-1.1.4",
"id": "9beebbc50937ae64c4fe8117f13c4c22566f335f",
"size": "2583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/customization/tst-normal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6307"
},
{
"name": "Python",
"bytes": "1521054"
},
{
"name": "Shell",
"bytes": "23730"
}
],
"symlink_target": ""
} |
from builtins import range
import numpy as np
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding,
stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
stride=1):
""" An implementation of col2im based on fancy indexing and np.add.at """
N, C, H, W = x_shape
H_padded, W_padded = H + 2 * padding, W + 2 * padding
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding,
stride)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
if padding == 0:
return x_padded
return x_padded[:, :, padding:-padding, padding:-padding]
pass
| {
"content_hash": "d21708181cdd13c49c2e916603b42598",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 80,
"avg_line_length": 38.49122807017544,
"alnum_prop": 0.5970829535095715,
"repo_name": "zklgame/CatEyeNets",
"id": "ab21d5310fdec7c91dfdf672a01f0547fe5e11d4",
"size": "2194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "layers/im2col.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "19717098"
},
{
"name": "Python",
"bytes": "142059"
},
{
"name": "Shell",
"bytes": "469"
}
],
"symlink_target": ""
} |
import os,sys,glob
#rootdir = '..'
clawdir = os.path.expandvars('$CLAW')
rootdir = clawdir
targetfiles = ['*.html','load.js']
oldpat = "http://localhost:50005"
newpat = "http://depts.washington.edu/clawpack/clawpack-4.6.2"
print "Will change ", oldpat
print " to ", newpat
print " in all of ", rootdir
ans = raw_input("Ok? ")
if ans.lower() not in ['y','yes']:
print "Aborting."
sys.exit()
for (dirpath, subdirs, files) in os.walk(rootdir):
currentdir = os.path.abspath(os.getcwd())
os.chdir(os.path.abspath(dirpath))
tfiles = []
for fpat in targetfiles:
for f in glob.glob(fpat):
tfiles.append(f)
for file in tfiles:
infile = open(file,'r')
lines = infile.read()
infile.close()
if lines.find(oldpat) > -1:
lines = lines.replace(oldpat, newpat)
print "Fixed file ",dirpath + '/' + file
else:
print "No change to ",dirpath + '/' + file
outfile = open(file,'w')
outfile.write(lines)
outfile.close()
os.chdir(currentdir)
| {
"content_hash": "bfa65e23a7b90a45f2709a4e432c5dba",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 62,
"avg_line_length": 25.928571428571427,
"alnum_prop": 0.5803489439853077,
"repo_name": "clawpack/clawpack-4.x",
"id": "632bc39ac62e8968f2b0ac6abdb3f448c1200ed5",
"size": "1236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/webify.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Fortran",
"bytes": "1413468"
},
{
"name": "HTML",
"bytes": "1032"
},
{
"name": "Limbo",
"bytes": "135"
},
{
"name": "M",
"bytes": "123"
},
{
"name": "Makefile",
"bytes": "153571"
},
{
"name": "Matlab",
"bytes": "311883"
},
{
"name": "Objective-C",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1242190"
},
{
"name": "Shell",
"bytes": "1579"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
class Button:
'Class for a Button'
def __init__(self, pinNumber, actionOverride=None):
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pinNumber, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(pinNumber, GPIO.RISING, self.action, 200)
self.pinNumber = pinNumber
self.__enabled = True
if (actionOverride != None):
# Override action method
self.__action = actionOverride
def activate(self):
self.__enabled = True
def deactivate(self):
self.__enabled = False
def action(self, channel):
if (self.__enabled):
self.__action(channel)
def __action(self, channel):
print ("Button on channel {0} pressed!".format(channel))
def setAction(self, actionOverride):
self.__action = actionOverride
| {
"content_hash": "b76bd9ba20dce27312d6d78928972fa9",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 65,
"avg_line_length": 23.4375,
"alnum_prop": 0.7,
"repo_name": "baconbum/SportsTicker",
"id": "b9b07eb375d7ee73f0dde79700374bdeaff6c6dc",
"size": "773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SportsTicker/Button.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "1230"
},
{
"name": "Python",
"bytes": "39608"
},
{
"name": "Shell",
"bytes": "284"
}
],
"symlink_target": ""
} |
"""
The InteractiveFK is a generic module that use layered 'ribbons' to easily rig anything.
Common use cases are tentacles, ropes, props, clothing, tongue, etc.
This module is also one of the few that support non-uniform scaling, making it perfect for crazy scenarios.
You'll need to provide at least one surface for the system to work.
The surface can be driven by various deformer, however the common use case is to provide a skinned surface.
If you also provide the deformed surface influences as inputs, the InteractiveFK will automatically
detect the surface and it's influence as a 'layer' and will rig it accordingly.
You'll also want to provide the influences for the final deformer (generally a mesh).
Again, the InteractiveFK will reconize that thoses inputs are not related to any surface and
will ensure that they follow the last layer.
Warning:
Please note that to correctly support scaling, all the computation are done in LOCAL space.
This mean that you CANNOT use the skinned surface influences to drive the final mesh.
"""
import pymel.core as pymel
from omtk.core.classCtrl import BaseCtrl
from omtk.core.classModuleMap import ModuleMap
from omtk.core.classModule import Module
from omtk.core import classCtrlModel
from omtk.libs import libRigging
from omtk.libs import libAttr
from omtk.libs import libPython
from omtk.libs import libHistory
from omtk.libs import libPymel
from omtk.libs import libSkinning
def _get_immediate_skincluster(transform):
# Ensure we deal with a transform.
if isinstance(transform, pymel.nodetypes.Shape):
transform = transform.getParent()
all_shapes = transform.getShapes()
skinclusters = [hist for hist in transform.listHistory() if isinstance(hist, pymel.nodetypes.SkinCluster)]
for skincluster in skinclusters:
for attr_output in skincluster.outputGeometry:
next_shape = next(iter(hist for hist in attr_output.listHistory(future=True) if
isinstance(hist, pymel.nodetypes.Shape)), None)
if next_shape in all_shapes:
return skincluster
class InteractiveFKCtrl(BaseCtrl):
pass
class InteractiveFKCtrlModel(classCtrlModel.CtrlModelCalibratable):
"""
This module allow the controller to follow a follicle and
hijack the influence skinCluster to only consider the local space.
"""
DEFAULT_NAME_USE_FIRST_INPUT = True
def __init__(self, *args, **kwargs):
super(InteractiveFKCtrlModel, self).__init__(*args, **kwargs)
self.follicle = None
self._stack = None
self._grp_bind = None
self._grp_offset = None
self._grp_output = None
def get_default_tm_ctrl(self):
pos_ref = self.jnt.getTranslation(space='world')
tm_ref = pymel.datatypes.Matrix(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
pos_ref.x, pos_ref.y, pos_ref.z, 1
)
return tm_ref
def _get_calibration_reference(self):
return self.follicle
@libPython.memoized_instancemethod
def get_bind_tm(self):
"""
:return: The ctrl transformation that will be used to determine the position of the follicle.
"""
if self.jnt is None:
self.warning("Cannot resolve ctrl matrix with no inputs!")
return None
tm = self.jnt.getMatrix(worldSpace=True)
return tm
@libPython.memoized_instancemethod
def get_bind_pos(self):
return self.get_bind_tm().translate
@libPython.memoized_instancemethod
def get_default_shape(self):
# If a surface we provided in the inputs, use it.
surface = self.get_surface()
if surface:
return surface
# We'll scan all available geometries and use the one with the shortest distance.
meshes = libHistory.get_affected_shapes(self.jnt)
meshes = list(set(meshes) & set(self.rig.get_shapes()))
return next(iter(meshes), None)
def build(self, module, create_follicle=True, pos=None, shape=None, shape_next=None, u_coord=None, v_coord=None,
constraint=True, **kwargs):
"""
:param pos: The position to use when seeking where to create the follicle. Can be resolved automatically if the module have an influence.
:param shape: The shape to create the follicle on. Can be resolved automatically if the module have an influence.
:param u_coord: The U coordinate to use for the follicle. Can be resolved automatically if the module have an influence.
:param v_coord: The V coordinate to use for the follicle. Can be resolved automatically if the module have an influence.
:param constraint: If True, the ctrl will drive the influence via direct connect.
:param kwargs: Any additional keyword argument will be passed to the parent method.
"""
super(InteractiveFKCtrlModel, self).build(
module,
parent=None, # We handle the parenting ourself!
**kwargs
)
nomenclature_rig = self.get_nomenclature_rig()
# Resolve bind position.
if pos is None:
pos = self.get_bind_pos()
#
# Create the 'bind' node that will follow the follicle in translation and something else in rotation.
#
# Create the a group containing the local offset in case we have an hyerarchy to preserve.
self._grp_offset = pymel.createNode(
'transform',
name=nomenclature_rig.resolve('offset'),
parent=self.grp_rig
)
attr_offset_tm = self._grp_offset.matrix
# Create a reference to the previous deformation
self._grp_bind = pymel.createNode(
'transform',
name=nomenclature_rig.resolve('follicle'),
parent=self.grp_rig
)
# self._layer_bind = self._stack.append_layer()
# self._layer_bind.rename(layer_fol_name)
self._grp_bind.setMatrix(self.get_bind_tm())
# self._layer_bind.setParent(self.grp_rig)
attr_bind_tm = self._grp_bind.matrix
attr_bind_tm_inv = self._grp_bind.inverseMatrix
# Compute the parent offset and the deformation offset toguether.
attr_total_offset = libRigging.create_utility_node(
'multMatrix',
name=nomenclature_rig.resolve('getOffset'),
matrixIn=(
attr_offset_tm,
attr_bind_tm,
)
).matrixSum
# Create follicle to track the transform BEFORE the ctrl.
if create_follicle:
# Resolve mesh if necessary.
if not shape:
shape = self.get_default_shape()
if not shape:
raise Exception("Can't resolve mesh to attach to!")
# Resolve uv coords if necessary
if u_coord is None or v_coord is None:
_, u_coord, v_coord = libRigging.get_closest_point_on_shape(shape, pos)
if u_coord is None or v_coord is None:
raise Exception("Can't resolve uv coordinates to use!")
fol_shape = libRigging.create_follicle2(shape, u=u_coord, v=v_coord)
ref_before = fol_shape.getParent()
ref_before.rename(nomenclature_rig.resolve('preCtrl'))
ref_before.setParent(self.grp_rig)
else:
ref_before = pymel.createNode(
'transform',
name=nomenclature_rig.resolve('preCtrl'),
parent=self.grp_rig
)
ref_before.setMatrix(self.get_bind_tm())
pymel.parentConstraint(ref_before, self._grp_bind, maintainOffset=True)
# Create follicle to track the transfort AFTER the ctrl.
# This will be used to position the controller correctly.
if shape_next:
# Resolve uv coords if necessary
_, u_coord, v_coord = libRigging.get_closest_point_on_shape(shape_next, pos)
if u_coord is None or v_coord is None:
raise Exception("Can't resolve uv coordinates to use!")
fol_shape = libRigging.create_follicle2(shape_next, u=u_coord, v=v_coord)
ref_after = fol_shape.getParent()
ref_after.rename(nomenclature_rig.resolve('postCtrl'))
ref_after.setParent(self.grp_rig)
self.follicle = ref_after
#
# Constraint grp_anm
#
# Create an output object that will hold the world position of the ctrl offset.
# This allow us to create direct connection which simplify the dag tree for the animator
# and allow us to easily scale the whole setup to support non-uniform scaling.
util_decompose_offset = libRigging.create_utility_node(
'decomposeMatrix',
inputMatrix=attr_total_offset
)
pymel.connectAttr(util_decompose_offset.outputTranslate, self.ctrl.offset.translate)
pymel.connectAttr(util_decompose_offset.outputRotate, self.ctrl.offset.rotate)
#
# Create an output group that contain the new joint position
#
grp_scale = pymel.createNode(
'transform',
name=nomenclature_rig.resolve('parent'),
parent=self.grp_rig
)
self._grp_output = pymel.createNode(
'transform',
name=nomenclature_rig.resolve('output'),
parent=grp_scale
)
attr_get_local_tm = libRigging.create_utility_node(
'multMatrix',
matrixIn=(
self.ctrl.matrix,
attr_total_offset
)
).matrixSum
util_decompose_local_tm = libRigging.create_utility_node(
'decomposeMatrix',
inputMatrix=attr_get_local_tm
)
pymel.connectAttr(util_decompose_local_tm.outputTranslate, self._grp_output.translate)
pymel.connectAttr(util_decompose_local_tm.outputRotate, self._grp_output.rotate)
pymel.connectAttr(util_decompose_local_tm.outputScale, self._grp_output.scale)
pymel.parentConstraint(self._grp_output, self.jnt, maintainOffset=True)
pymel.scaleConstraint(self._grp_output, self.jnt, maintainOffset=True)
surface = self.get_surface()
skincluster = _get_immediate_skincluster(surface)
index = libSkinning.get_skin_cluster_influence_objects(skincluster).index(self.jnt)
pymel.connectAttr(attr_bind_tm_inv, skincluster.bindPreMatrix[index], force=True)
def unbuild(self, **kwargs):
super(InteractiveFKCtrlModel, self).unbuild(**kwargs)
self.follicle = None
class InteractiveFKLayer(ModuleMap):
_CLS_CTRL_MODEL = InteractiveFKCtrlModel
_CLS_CTRL = InteractiveFKCtrl
_NAME_CTRL_ENUMERATE = True # Same implementation than FK
def __init__(self, *args, **kwargs):
super(InteractiveFKLayer, self).__init__(*args, **kwargs)
# Used for constraining if necessary
self._grp_parent = None
def init_model(self, model, inputs, **kwargs):
"""
Ensure the surface is present in the inputs.
"""
surface = self.get_surface()
if not surface:
raise Exception("Expected surface in inputs.")
if surface not in inputs:
inputs.append(surface)
return super(InteractiveFKLayer, self).init_model(model, inputs, **kwargs)
def build(self, parent=False, **kwargs):
nomenclature_rig = self.get_nomenclature_rig()
super(InteractiveFKLayer, self).build(parent=False, **kwargs)
# Create a group for all the influences
grp_influences = pymel.createNode(
'transform',
name=nomenclature_rig.resolve('jnts'),
parent=self.grp_rig
)
common_parent = libPymel.get_common_parents(self.jnts)
for jnt in self.jnts:
if jnt.getParent() == common_parent:
jnt.setParent(grp_influences)
# Parent the surface into the surface group
for surface in self.get_surfaces():
surface.setParent(self.grp_rig)
def build_models(self, constraint=False, calibrate=True, **kwargs):
nomenclature_anm = self.get_nomenclature_anm()
nomenclature_rig = self.get_nomenclature_rig()
# Create a parent grp
# This will be used by the models if they need to follow the parent.
# Normally this is only necessary on the last layer.
self._grp_parent = pymel.createNode(
'transform',
name=nomenclature_rig.resolve('parent'),
parent=self.grp_rig
)
for i, (jnt, model) in enumerate(zip(self.jnts, self.models)):
# Resolve ctrl name.
if self._NAME_CTRL_ENUMERATE:
ctrl_name = nomenclature_anm.resolve('{0:02d}'.format(i + 1))
else:
nomenclature = nomenclature_anm + self.rig.nomenclature(jnt.stripNamespace().nodeName())
ctrl_name = nomenclature.resolve()
self.build_model(
model,
ctrl_name=ctrl_name,
**kwargs
)
if model._grp_output:
model._grp_output.setParent(self._grp_parent)
if calibrate:
model.calibrate()
# For each models, hijack the 'offset' group in case we have an hierarchy to keep.
for model in self.models:
# Resolve the parent influence
model_parent = model.parent
if not model_parent:
continue
if not isinstance(model_parent, pymel.nodetypes.Joint):
self.warning("Cannot compute offset for parent. Unsupported node type {} for {}".format(
type(self.parent), self.parent
))
continue
# Resolve the parent model
parent_model = next((model for model in self.models if model.jnt == model_parent), None)
if not parent_model:
self.warning("Cannot compute offset for parent. Found no model associated with {}".format(model_parent))
continue
self._constraint_model_virtual_offset(model, parent_model)
def _constraint_model_virtual_offset(self, model, parent_model):
"""
Create the equivalent of a parent constraint between two models.
This allow us to support fk-style functionnality.
:param model: A child InteractiveFKLayer instance.
:param parent_model: A parent InteractiveFKLayer instance.
"""
nomenclature_rig = self.get_nomenclature_rig()
model_parent = parent_model.jnt
parent_ctrl = parent_model.ctrl
parent_grp_offset = parent_model._grp_offset
attr_parent_world_bindpose_tm_inv = libRigging.create_utility_node(
'inverseMatrix',
name=nomenclature_rig.resolve('getParentBindPose'),
inputMatrix=model_parent.bindPose
).outputMatrix
attr_local_bindpose_tm = libRigging.create_utility_node(
'multMatrix',
name=nomenclature_rig.resolve('getLocalBindPose'),
matrixIn=(
model.jnt.bindPose,
attr_parent_world_bindpose_tm_inv
)
).matrixSum
attr_local_bindpose_tm_inv = libRigging.create_utility_node(
'inverseMatrix',
name=nomenclature_rig.resolve('getLocalBindPoseInv'),
inputMatrix=attr_local_bindpose_tm
).outputMatrix
attr_parent_offset_world_tm = libRigging.create_utility_node(
'multMatrix',
matrixIn=(
attr_local_bindpose_tm,
parent_grp_offset.matrix,
attr_local_bindpose_tm_inv,
)
).matrixSum
#
# Compute the distorsion introduced by the bind.
#
attr_follicle_world_tm = model._grp_bind.matrix
attr_parent_follicle_world_tm_inv = parent_model._grp_bind.inverseMatrix
attr_follicle_delta_tm = libRigging.create_utility_node(
'multMatrix',
matrixIn=(
attr_follicle_world_tm,
attr_parent_follicle_world_tm_inv,
attr_local_bindpose_tm_inv,
)
).matrixSum
attr_follicle_delta_tm_inv = libRigging.create_utility_node(
'inverseMatrix',
name=nomenclature_rig.resolve('getLocalBindPoseInv'),
inputMatrix=attr_follicle_delta_tm
).outputMatrix
attr_offset_tm = libRigging.create_utility_node(
'multMatrix',
name=nomenclature_rig.resolve('getOffsetTM'),
matrixIn=(
attr_local_bindpose_tm,
attr_follicle_delta_tm,
parent_ctrl.matrix,
attr_follicle_delta_tm_inv,
attr_local_bindpose_tm_inv,
attr_parent_offset_world_tm
)
).matrixSum
util_decompose_offset_tm = libRigging.create_utility_node(
'decomposeMatrix',
inputMatrix=attr_offset_tm
)
pymel.connectAttr(util_decompose_offset_tm.outputTranslate, model._grp_offset.translate)
pymel.connectAttr(util_decompose_offset_tm.outputRotate, model._grp_offset.rotate)
pymel.connectAttr(util_decompose_offset_tm.outputScale, model._grp_offset.scale)
def unbuild(self, **kwargs):
# Ensure surface is not destroyed by the unbuild process.
surface = self.get_surface()
if libPymel.is_child_of(surface, self.grp_rig):
surface.setParent(world=True)
# Ensure influences are not destroyed by the unbuild process.
influences = self.jnts
common_parent = libPymel.get_common_parents(influences)
for influence in influences:
if influence.getParent() == common_parent:
influence.setParent(world=True)
super(InteractiveFKLayer, self).unbuild(**kwargs)
# We have connection from rig parts in the skinCluster bindPreMatrix,
# if we remove the rig, this would reset the bindPreMatrix and result in double transformation.
# To cancel that, we'll need to reset the bindPreMatrix attributes.
is_skin_cluster = lambda x: isinstance(x, pymel.nodetypes.SkinCluster)
for skin_cluster in libHistory.iter_history_backward(surface, key=is_skin_cluster, stop_at_shape=True):
attr_matrices = skin_cluster.matrix
attr_pre_matrices = skin_cluster.bindPreMatrix
num_elements = attr_matrices.numElements()
for i in range(num_elements):
attr_matrix = attr_matrices[i]
attr_pre_matrix = attr_pre_matrices[i]
attr_pre_matrix.set(attr_matrix.get().inverse())
class InteractiveFK(Module):
_CLS_LAYER = InteractiveFKLayer
_VALIDATE_NEED_SURFACE = True
def __init__(self, *args, **kwargs):
super(InteractiveFK, self).__init__(*args, **kwargs)
# This will contain all the layers that take part in the system.
self.layers = []
# The group that all surface will be parented to.
self._grp_surfaces = None
self._grp_parent = None
@property
def parent(self):
return libPymel.get_common_parents(self._get_unassigned_influences())
def validate(self, epsilon=0.001):
super(InteractiveFK, self).validate()
# Ensure that all influences have a common parent for proprer scale handling.
# if not self._get_parent():
# raise Exception("Found no common parents for inputs.")
surfaces = self.get_surfaces()
if self._VALIDATE_NEED_SURFACE:
if not surfaces:
raise Exception("Missing required input of type NurbsSurface")
# Ensure there's no useless surface in the inputs.
unassigned_surfaces = self._get_unassigned_surfaces()
if unassigned_surfaces:
raise Exception(
"Useless surface(s) found: {}".format(', '.join((surface.name() for surface in unassigned_surfaces))))
# todo: Ensure all surface have an identity matrix
attr_to_check = {
'translateX': 0.0,
'translateY': 0.0,
'translateZ': 0.0,
'rotateX': 0.0,
'rotateY': 0.0,
'rotateZ': 0.0,
'scaleX': 1.0,
'scaleY': 1.0,
'scaleZ': 1.0,
}
for surface in surfaces:
for attr_name, desired_val in attr_to_check.iteritems():
attr = surface.attr(attr_name)
attr_val = attr.get()
if abs(attr_val - desired_val) > epsilon:
raise Exception(
"Surface {} have invalid transform! Expected {} for {}, got {}.".format(surface, desired_val,
attr_name, attr_val))
# Ensure all provided surfaces have the same cv count.
# num_cvs = None
# for surface in surfaces:
# cur_num_cvs = len(surface.cv)
# if num_cvs is None:
# num_cvs = cur_num_cvs
# elif cur_num_cvs != num_cvs:
# raise Exception("Not all input NurbsSurface have the same cv count!")
@libPython.memoized_instancemethod
def get_influences_by_surfaces(self):
"""
Analyze the inputs to resolve what influence are skinned to which surface.
This allow us to interpret the inputs and create layers accordingly.
:return: A list of two-sized tuple containing the surface and influence for each layers.
"""
result = []
jnts = set(self.jnts)
unassigned_jnts = set(self.jnts)
# Sort surface by deformation history.
surfaces = self.get_surfaces()
# Ensure we are working directly with shapes.
surfaces = [surface.getShape(noIntermediate=True) if isinstance(surface, pymel.nodetypes.Transform) else surface
for surface in surfaces]
# Sort the surface by their construction history.
# If the surface are already blendshaped toguether, this will work.
def _fn_compare(obj_a, obj_b):
hist_a = [hist for hist in obj_a.listHistory() if isinstance(hist, pymel.nodetypes.Shape)]
hist_b = [hist for hist in obj_b.listHistory() if isinstance(hist, pymel.nodetypes.Shape)]
if obj_b in hist_a:
return 1
elif obj_a in hist_b:
return -1
# If nothing works, compare their name...
# We might get lucky and have correctly named objects like layer0, layer1, etc.
self.warning("Saw no relationship between {} and {}. Will sort them by name.".format(obj_a, obj_b))
return cmp(obj_a.name(), obj_b.name())
surfaces = sorted(surfaces, cmp=_fn_compare)
for surface in surfaces:
skincluster = _get_immediate_skincluster(surface)
if not skincluster:
self.warning("Found no skinCluster for {}".format(surface))
continue
cur_influences = set(libSkinning.get_skin_cluster_influence_objects(skincluster))
cur_jnts = list(jnts & cur_influences)
unassigned_jnts -= cur_influences
result.append(
(surface.getParent(), cur_jnts)
)
return result
def _get_unassigned_influences(self):
"""
Return all influences that don't affect any layers.
Theses influences will be automatically constrained to the last layer.
:return: A list of pymel.nodetypes.Joint instances.
"""
jnts = set(self.jnts)
for _, influences in self.get_influences_by_surfaces():
jnts -= set(influences)
return list(jnts)
def _get_unassigned_surfaces(self):
"""
Return all surface that are not affected by any influences.
We currently do nothing with theses surfaces.
:return: A list of pymel.PyNode representing the surfaces.
"""
surfaces = set(self.get_surfaces())
for surface, _ in self.get_influences_by_surfaces():
surfaces.discard(surface)
return list(surfaces) if surfaces else []
@staticmethod
def iter_uvs(num_u, num_v, min_u=0.0, max_u=1.0, min_v=0.0, max_v=1.0):
"""
Generator for creating multiples objects on a surface.
Note that if only one influence is provided for U or V space, it will be located at the center.
:param num_u: The number of influences to create on U space.
:param num_v: The number of influences to create on V space.
:return: Wield a tuple of size four that contain the u counter, v counter, u coordinate and v coordinate.
"""
for u_index in range(num_u):
if num_u > 1:
ratio = (u_index / float(num_u - 1))
u = libRigging.interp_linear(ratio, min_u, max_u)
else:
u = 0.5
for v_index in range(num_v):
if num_v > 1:
ratio = (v_index / float(num_v - 1))
v = libRigging.interp_linear(ratio, min_v, max_v)
else:
v = 0.5
yield u_index, v_index, u, v
def create_layer_from_surface(self, num_u=3, num_v=1, min_u=0.0, max_u=1.0, min_v=0.0, max_v=1.0,
format_str='U{:02d}V{:02d}', cls_ctrl=None, suffix=None):
"""
Create a new layer module by duplicating the reference surface and generating influences using predefined rules.
Note that this does not add it to the layer stack.
:param num_u: How much influences to generate in the surface U space.
:param num_v: How much influences to generate in the surface U space.
:param format_str: An str instance that drive how the influence are named using python string formattingmechanismm.
:param suffix: The suffix to add to the moduel name.
:return: An instance of the module class defined in self._CLS_LAYER.
"""
# Create the module first so we can access it's nomenclature.
# We'll add the inputs afterward.
# module = self.init_module(self._CLS_LAYER, None, suffix=suffix)
nomenclature_jnt = self.get_nomenclature_jnt()
nomenclature_rig = self.get_nomenclature_rig()
# Create surface
surface = self._create_surface(
name=nomenclature_rig.resolve(suffix, 'surface'),
)
jnts = []
for u_index, v_index, u_coord, v_coord in self.iter_uvs(num_u, num_v, min_u=min_u, max_u=max_u, min_v=min_v,
max_v=max_v):
pos = libRigging.get_point_on_surface_from_uv(surface, u_coord, v_coord)
jnt = pymel.createNode(
'joint',
name=nomenclature_jnt.resolve(suffix, format_str.format(u_index, v_index))
)
jnt.setTranslation(pos)
jnts.append(jnt)
# Assign a skinCluster on the surface using the influences.
pymel.skinCluster(jnts, surface, mi=3)
module = self.init_layer(None, inputs=jnts + [surface], cls_ctrl=cls_ctrl, suffix=suffix)
return module
def init_layer(self, inst, inputs=None, suffix=None, cls_layer=None, cls_ctrl=None):
cls_layer = cls_layer or self._CLS_LAYER
module = self.init_module(cls_layer, inst, inputs=inputs, suffix=suffix)
if cls_ctrl:
module._CLS_CTRL = cls_ctrl
return module
def _init_layers(self):
"""
Initialize any preset of layer configuration.
Override this if you define a custom Module from this one.
"""
# Build layers from inputs.
data = self.get_influences_by_surfaces()
# Ensure we have at least as many slots allocated that we have groups.
num_layers = len(self.layers)
num_data = len(data)
if num_layers < num_data:
libPython.resize_list(self.layers, num_data)
self.debug('Found {} layer groups'.format(len(data)))
for i, sub_data in enumerate(data):
self.debug('Creating layer {} using {}'.format(i + 1, sub_data))
surface, influences = sub_data
self.layers[i] = self.init_layer(self.layers[i], inputs=[surface] + influences,
suffix='layer{}'.format(i + 1))
def _build_layers(self, ctrl_size_max=None, ctrl_size_min=None):
layers = self.layers
num_layers = len(layers)
for i in range(num_layers):
prev_layer = layers[i - 1] if i > 0 else None
curr_layer = layers[i]
# Define desired ctrl size
ratio = float(i) / (num_layers - 1) if num_layers > 1 else 1
ctrl_size = libRigging.interp_linear(ratio, ctrl_size_max,
ctrl_size_min) if ctrl_size_max and ctrl_size_min else None
shape = prev_layer.get_surface() if prev_layer else None
shape_skinned = curr_layer.get_surface()
create_follicle = True if prev_layer else None
curr_layer.build(
create_follicle=create_follicle,
shape=shape,
shape_next=shape_skinned,
ctrl_size=ctrl_size,
parent=False
)
is_last_layer = i == num_layers - 1
if is_last_layer and self.parent:
curr_layer.parent_to(self.parent)
curr_layer.grp_anm.setParent(self.grp_anm)
curr_layer.grp_rig.setParent(self.grp_rig)
def _create_surface(self, ref_surface=None, parent=None, name=None, **kwargs):
"""
Create a new surface for layer user.
The resulting surface will be 'safe' to use with no scale or locked attributes.
:param kwargs:
:return:
"""
if ref_surface is None:
ref_surface = self.get_surface()
new_surface = pymel.duplicate(ref_surface, **kwargs)[0]
if parent:
new_surface.setParent(parent)
if name:
new_surface.rename(name)
libAttr.unlock_trs(new_surface)
pymel.makeIdentity(new_surface, apply=True, scale=True)
return new_surface
def _get_default_ctrl_size(self):
surface = self.get_surface()
length_u, length_v = libRigging.get_surface_length(surface)
return min(length_u, length_v)
def build(self, ctrl_size_max=None, ctrl_size_min=None, parent=True, **kwargs):
"""
:param ctrl_size_max: Used to automatically size layer ctrls. Define the maximum size (applied on first layer)
:param ctrl_size_min: Used to automatically size layer ctrls. Define the minimum size (applied on last layer)
:param parent: Redefined to compensate for bad design. Identical implementation than base class.
:param kwargs: Any keyword argument will be forwarded to the base method.
"""
super(InteractiveFK, self).build(parent=None, **kwargs)
nomenclature_rig_grp = self.get_nomenclature_rig_grp()
nomenclature_jnt = self.get_nomenclature_jnt()
# Create a group that we will parent all surfaces to.
self._grp_surfaces = pymel.createNode(
'transform',
name=nomenclature_rig_grp.resolve('surfaces'),
parent=self.grp_rig
)
self._init_layers()
# Resolve default ctrl_size
if ctrl_size_min is None or ctrl_size_max is None:
val = self._get_default_ctrl_size()
ctrl_size_max = val * 0.25
ctrl_size_min = ctrl_size_max / float(len(self.layers))
self.info('Default ctrl size is adjusted from bettwen {} at {}'.format(ctrl_size_min, ctrl_size_max))
self._build_layers(
ctrl_size_max=ctrl_size_max,
ctrl_size_min=ctrl_size_min
)
# Create a group that represent the original parent of everything.
# This allow use to supported non-uniform scaling by using direct connections instead of parent/scaleConstraint.
parent_obj = self.get_parent_obj()
self._grp_parent = pymel.createNode(
'transform',
name=nomenclature_rig_grp.resolve('parent'),
parent=self.grp_rig
)
# Rig parenting
if parent_obj:
self._grp_parent.setMatrix(parent_obj.getMatrix(worldSpace=True))
# For each influence, create a follicle that will follow the final mesh.
unassigned_influences = self._get_unassigned_influences()
last_surface = self.layers[-1].get_surface()
if unassigned_influences and last_surface:
grp_follicles = pymel.createNode(
'transform',
name=nomenclature_rig_grp.resolve('follicles'),
parent=self.grp_rig,
)
for i, jnt in enumerate(unassigned_influences):
nomenclature = nomenclature_jnt + self.rig.nomenclature(jnt.stripNamespace().nodeName())
# Get the final LOCAL transformation of the influence.
# If we have a parent, we'll want to convert it to WORLD transformation.
pos = jnt.getTranslation(space='world')
_, u, v = libRigging.get_closest_point_on_surface(last_surface, pos)
fol_shape = libRigging.create_follicle2(last_surface, u, v, connect_transform=True)
fol_transform = fol_shape.getParent()
fol_transform.rename(nomenclature.resolve())
fol_transform.setParent(grp_follicles)
# Connect the influence.
# Note that we don't apply any scale constraining since we assume that all influence have
# the same common parent that drive the scale.
if parent_obj:
# Use an extra object to match original influence transform.
grp_output = pymel.createNode(
'transform',
name=nomenclature_jnt.resolve('output{}'.format(i)),
parent=self._grp_parent
)
grp_output.setMatrix(jnt.getMatrix(worldSpace=True), worldSpace=True)
pymel.parentConstraint(fol_transform, grp_output, maintainOffset=True)
# Hack: Reset joint orient so our direct connection work...
# todo: use compose matrix?
if isinstance(jnt, pymel.nodetypes.Joint):
jnt.jointOrientX.set(0.0)
jnt.jointOrientY.set(0.0)
jnt.jointOrientZ.set(0.0)
libAttr.connect_transform_attrs(grp_output, jnt, sx=False, sy=False, sz=False)
else:
pymel.parentConstraint(fol_transform, jnt, maintainOffset=True)
# Manually parent the module with support for scaling.
if parent_obj and parent_obj != self.grp_anm:
pymel.parentConstraint(parent_obj, self.grp_anm, maintainOffset=True)
pymel.scaleConstraint(parent_obj, self.grp_anm, maintainOffset=True)
def unbuild(self, **kwargs):
for layer in self.layers:
if layer.is_built():
layer.unbuild()
super(InteractiveFK, self).unbuild(**kwargs)
def register_plugin():
return InteractiveFK
| {
"content_hash": "fca642201d8d1abd12d9f82fd0efae87",
"timestamp": "",
"source": "github",
"line_count": 865,
"max_line_length": 145,
"avg_line_length": 41.49248554913295,
"alnum_prop": 0.6029088072218662,
"repo_name": "SqueezeStudioAnimation/omtk",
"id": "897b2c62df9480ebea7a92d11a2fc66431123727",
"size": "35891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/omtk/modules/rigInteractiveFK.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "1124321"
},
{
"name": "Python",
"bytes": "1054644"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
"""
This module contains the following classes:
Context
InbandContext
BlindContext
A context represents a set of conditions required to perform correctly
an SQL injection.
"""
from random import choice
class Context:
"""
Context class
This class is used to store every info related to the injection context.
"""
FIELD_STR = 'string'
FIELD_INT = 'int'
INBAND = 'inband'
BLIND = 'blind'
def __init__(self, method=INBAND, field_type=FIELD_STR, url='',
params=None, target=None, comment='/*', strdelim="'", union_tag=None,
union_fields=(), default='0', union_target=-1, use_ssl=False,
smooth=False, headers=None, cookie=None, multithread=True,
truncate=False, encode_str=False):
'''
Default injection context constructor.
'''
# injection method
self.__method = method
self.__url = url
self.__params = params
self.__target = target
self.__comment = comment
self.__str_delim = strdelim
self.__default = default
self.__use_ssl = use_ssl
self.__encode_str = encode_str
self.__truncate = truncate
self.__field_type = field_type
self.__smooth = smooth
self.__headers = headers
self.__cookie = cookie
self.__multithread = multithread
# inband specific
self.__union_fields = union_fields
self.__union_target = union_target
if union_tag is not None:
self.__union_tag = union_tag
else:
self.__union_tag = ''.join([choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ') for i in range(32)])
def get_url(self):
"""
Returns the target URL
"""
return self.__url
def set_url(self, url):
"""
Set the target URL
"""
self.__url = url
## Set vulnerable field type
# @param field_type Field type, must be either FIELD_STR or FIELD_INT
def set_field_type(self, field_type):
"""
Set field type (FIELD_INT or FIELD_STR)
"""
self.__field_type = field_type
## Get vulnerable field type
# @return Vulnerable field type (FIELD_INT or FIELD_STR)
def get_field_type(self):
"""
Get field type (FIELD_INT or FIELD_STR)
"""
return self.__field_type
## Enable SQL string encvoding
# Enable SQL string encoding to evade anti-quote functions or WAF
def enable_string_encoding(self, enabled):
"""
Enable/disable string encoding.
"""
self.__encode_str = enabled
## Enable SQL query truncation
# If enabled, comment out the rest of the SQL query
def enable_truncate(self, enabled):
"""
Enable/disable request truncate.
"""
self.__truncate = enabled
## Check if context asks for query truncating
# @return True if query truncation is enabled, False otherwise
def require_truncate(self):
"""
Retrieve request truncation requirement.
"""
return self.__truncate
## Check if string encoding is required
# @return True if string encoding is required, False otherwise
def require_string_encoding(self):
"""
Determine if string encoding is required or not.
"""
return self.__encode_str
## Enable SSL support
# @param enabled True to enable, False to disable
def enable_ssl(self, enabled):
"""
Enable/disable SSL support.
"""
self.__use_ssl = enabled
## Check if SSL is required
# @return True if SSL is required, False otherwise
def use_ssl(self):
"""
Return True if SSL must be used, False otherwise.
"""
return self.__use_ssl
def set_smooth(self, enabled=True):
"""
Enable/disable smooth mode
"""
self.__smooth = enabled
def is_smooth(self):
"""
Determine if smooth must be used or not.
"""
return self.__smooth
def set_multithread(self, enabled=True):
"""
Enable/disable multithreading.
"""
self.__multithread = enabled
def is_multithread(self):
"""
Determine if multithreading must be used or not.
"""
return self.__multithread
def has_headers(self):
"""
Determine if extra headers must be used
"""
return self.__headers is not None
def set_headers(self, headers):
"""
Set extra headers.Headers
headers: dict of extra headers (mostly HTTP)
"""
self.__headers = headers
def set_header(self, header, value):
"""
Set a given header
header: header name (string)
value: header value (usually, string)
"""
if self.__headers is not None:
self.__headers[header] = value
else:
self.__headers = {header: value}
def get_headers(self):
"""
Get all headers
"""
return self.__headers
def set_cookie(self, cookie):
"""
Set HTTP cookie.
cookie: cookie value.
"""
self.__cookie = cookie
def get_cookie(self):
"""
Get cookie.
"""
return self.__cookie
def set_params(self, params, target=None):
"""
Set parameters and target parameter.
params: dict of parameters
target: target parameter
"""
self.__params = params
self.__target = target
def get_params(self):
"""
Retrieve parameters.
"""
return self.__params
def get_target_param(self):
"""
Retrieve the target parameter
"""
return self.__target
def get_comment(self):
"""
Get comment sequence
"""
return self.__comment
def set_comment(self, comment):
"""
Set comment seqence
"""
self.__comment = comment
def get_string_delimiter(self):
"""
Retrieve string delimiter
"""
return self.__str_delim
def set_string_delimiter(self, delim):
"""
Set string delimiter
delim: string delimiter
"""
self.__str_delim = delim
def set_default_value(self, default):
"""
Set default value to use in the SQL code
default: default value (string in case of FIELD_STR,
int in case of FIELD_INT)
"""
self.__default = default
def get_default_value(self):
"""
Retrieve default value
"""
return self.__default
def set_inband_fields(self, fields):
"""
Set inband fields
Inband fields are quite special: they are described with a single string
with these possible caracters:
- s: specify a string field
- i: specify an integer field
This is used to be compliant with Oracle, Mssql, and other DBMS.
Example:
context.set_inband_fields('sssisi')
declares 6 fields, [string, string, string, integer, string, integer]
"""
self.__union_fields = fields
def get_inband_fields(self):
"""
Retrieve inband fields types
"""
return self.__union_fields
def get_inband_tag(self):
"""
Get inband tag
The inband tag is a string used to wrap the extracted string in order
to extract it easily. This tag is randomly generated when an instance
of the Context class is created.
"""
return self.__union_tag
def set_inband_target(self, target):
"""
Sets inband target field index
"""
self.__union_target = int(target)
def get_inband_target(self):
"""
Retrieve inband target field index
"""
return self.__union_target
def is_blind(self):
"""
Determines if the actual injection context is blind
"""
return (self.__method == Context.BLIND)
def is_inband(self):
"""
Determines if the actual injection context is inband
"""
return (self.__method == Context.INBAND)
def in_string(self):
"""
Determines if the target field is a string
"""
return (self.__field_type == Context.FIELD_STR)
def in_int(self):
"""
Determines if the target field is an int
"""
return (self.__field_type == Context.FIELD_INT)
def use_blind(self):
"""
Switch to blind injection
"""
self.__method = Context.BLIND
def use_inband(self):
"""
Switch to inband injection
"""
self.__method = Context.INBAND
class InbandContext(Context):
"""
Inband injection context
"""
def __init__(self, **kwargs):
kwargs['method'] = Context.INBAND
Context.__init__(self, **kwargs)
class BlindContext(Context):
"""
Blind injection context
"""
def __init__(self, **kwargs):
kwargs['method'] = Context.BLIND
Context.__init__(self, **kwargs)
| {
"content_hash": "b493a28a61695b4baac599c1efac4627",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 97,
"avg_line_length": 24.8005249343832,
"alnum_prop": 0.5443962324055456,
"repo_name": "sysdream/pysqli",
"id": "a2f0e62dfc955d82e937dc8f6340a4f00b55d561",
"size": "9479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysqli/core/context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "926"
},
{
"name": "Python",
"bytes": "78925"
}
],
"symlink_target": ""
} |
class BaseFriendsProvider():
def fetch_friends(self, user):
"""abstract method"""
raise NotImplementedError("Should have implemented this")
def fetch_friend_ids(self, user):
"""abstract method"""
raise NotImplementedError("Should have implemented this")
| {
"content_hash": "34a040278d6cc0d646778f4e97f12a97",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 65,
"avg_line_length": 32.888888888888886,
"alnum_prop": 0.668918918918919,
"repo_name": "laplacesdemon/django-social-friends-finder",
"id": "7b87067b3e0facd78a734a89eb7d8c8213533477",
"size": "296",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "social_friends_finder/backends/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "52159"
}
],
"symlink_target": ""
} |
import os
#import socket
import json
import time
import urllib2
import re
import stat
import common
from instagram.client import InstagramAPI
from instagram.bind import InstagramAPIError
from instagram.bind import InstagramClientError
class BaseCrawler:
def __init__(self, configurationsDictionary):
self._extractConfig(configurationsDictionary)
self.echo = common.EchoHandler(self.config["echo"])
def _extractConfig(self, configurationsDictionary):
self.config = configurationsDictionary
if ("echo" not in self.config): self.config["echo"] = {}
def crawl(self, resourceID, filters):
return (None, None, None)
class UsersCrawlerDB(BaseCrawler):
# State codes:
# valid => Successful collection
# not_allowed => APINotAllowedError - you cannot view this resource
# not_found => APINotFoundError - this user does not exist
def crawl(self, resourceID, filters):
# Extract filters
application = filters[0]["data"]["application"]
self.echo.out(u"ID: %s (App: %s)." % (resourceID, application["name"]))
# Get authenticated API object
clientID = str(application["clientid"])
clientSecret = str(application["clientsecret"])
api = InstagramAPI(client_id = clientID, client_secret = clientSecret)
# Configure data storage directory
usersBaseDir = "../../data-update/users"
usersDataDir = os.path.join(usersBaseDir, str(int(resourceID) % 1000))
try: os.makedirs(usersDataDir)
except OSError: pass
# Initialize return variables
resourceInfo = {"current_state": "valid"}
extraInfo = {"InstagramAppFilter": {}, "MySQLBatchInsertFilter": []}
# Execute collection
while True:
try:
userInfo = api.user(user_id=resourceID, return_json=True)
request = urllib2.Request("http://instagram.com/%s" % userInfo["username"])
userPage = urllib2.urlopen(request).read()
except (InstagramAPIError, InstagramClientError) as error:
if (error.status_code == 400):
if (error.error_type == "APINotAllowedError"):
resourceInfo["current_state"] = "not_allowed"
break
elif (error.error_type == "APINotFoundError"):
resourceInfo["current_state"] = "not_found"
break
else: raise
else:
userInfoFilePath = os.path.join(usersDataDir, "%s.user" % resourceID)
userPageFilePath = os.path.join(usersDataDir, "%s.html" % resourceID)
with open(userInfoFilePath, "w") as output: json.dump(userInfo, output)
with open(userPageFilePath, "w") as output: output.write(userPage)
os.chmod(userInfoFilePath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH)
os.chmod(userPageFilePath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH)
# Send user information back to batch insert filter
userInfo["counts_media"] = userInfo["counts"]["media"]
userInfo["counts_follows"] = userInfo["counts"]["follows"]
userInfo["counts_followed_by"] = userInfo["counts"]["followed_by"]
del userInfo["counts"]
extraInfo["MySQLBatchInsertFilter"].append(userInfo)
resourceInfo["is_verified"] = (re.search("\"is_verified\":true", userPage) is not None)
break
return (resourceInfo, extraInfo, None)
class UsersCrawlerFile(BaseCrawler):
# Response codes:
# 3 => Successful collection
# -4 => APINotAllowedError - you cannot view this resource
# -5 => APINotFoundError - this user does not exist
def crawl(self, resourceID, filters):
self.echo.out(u"User ID received: %s." % resourceID)
# Extract filters
application = filters[0]["data"]["application"]
# Get authenticated API object
clientID = application["clientid"]
clientSecret = application["clientsecret"]
api = InstagramAPI(client_id = clientID, client_secret = clientSecret)
self.echo.out(u"App: %s." % str(application["name"]))
# Configure exception handling
maxNumberOfRetrys = 8
retrys = 0
sleepSecondsMultiply = 3
# Configure data storage directory
usersBaseDir = "../../data/users"
usersDataDir = os.path.join(usersBaseDir, str(resourceID % 1000))
try: os.makedirs(usersDataDir)
except OSError: pass
# Initialize return variables
responseCode = 3
#extraInfo = {"InstagramAppFilter": {}, "SaveResourcesFilter": []}
extraInfo = {"InstagramAppFilter": {}}
# Execute collection
while (True):
try:
userInfo = api.user(user_id=resourceID, return_json=True)
except (InstagramAPIError, InstagramClientError) as error:
if (error.status_code == 400):
if (error.error_type == "APINotAllowedError"):
responseCode = -4
break
elif (error.error_type == "APINotFoundError"):
responseCode = -5
break
else:
if (retrys < maxNumberOfRetrys):
sleepSeconds = 2 ** sleepSecondsMultiply
self.echo.out(u"API call error. Trying again in %02d second(s)." % sleepSeconds, "EXCEPTION")
time.sleep(sleepSeconds)
sleepSecondsMultiply += 1
retrys += 1
else:
raise SystemExit("Maximum number of retrys exceeded.")
else:
output = open(os.path.join(usersDataDir, "%s.user" % resourceID), "w")
json.dump(userInfo, output)
output.close()
# Extract user counts to send back to SaveResourcesFilter
# userCounts = {"counts_media": userInfo["counts"]["media"],
# "counts_follows": userInfo["counts"]["follows"],
# "counts_followedby": userInfo["counts"]["followed_by"]}
# extraInfo["SaveResourcesFilter"].append((resourceID, userCounts))
break
# Get rate remaining to send back to InstagramAppFilter
extraInfo["InstagramAppFilter"]["appname"] = application["name"]
extraInfo["InstagramAppFilter"]["apprate"] = int(api.x_ratelimit_remaining)
return ({#"crawler_name": socket.gethostname(),
"response_code": responseCode},
extraInfo,
None)
| {
"content_hash": "e88d4f1d2297f68ca7f4e41e3a709e91",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 117,
"avg_line_length": 43.7239263803681,
"alnum_prop": 0.564753753332398,
"repo_name": "fghso/instagram-crawler",
"id": "a419e002d8711089e4965f716f30d51cbb7a0af6",
"size": "7157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170483"
}
],
"symlink_target": ""
} |
import platform
import os
file_path = os.path.abspath(__file__)[0:]
if (platform.system() == 'Windows'):
GEOMETRA_ROOT = os.environ['USERPROFILE'] + '\\.GeometrA'
RESOURCE_PATH = file_path[0:file_path.rindex('\\', 0, file_path.rindex('\\', 0, file_path.rindex('\\')))] + '\\resources'
else:
GEOMETRA_ROOT = os.environ['HOME'] + '/.GeometrA'
RESOURCE_PATH = file_path[0:file_path.rindex('/', 0, file_path.rindex('/', 0, file_path.rindex('/')))] + '/resources'
if (not os.path.isdir(GEOMETRA_ROOT)):
if (os.path.isfile(GEOMETRA_ROOT)):
os.remove(GEOMETRA_ROOT)
os.mkdir(GEOMETRA_ROOT)
| {
"content_hash": "fdf6d0fd8b6083d939026a8335e3c8a8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 125,
"avg_line_length": 38.625,
"alnum_prop": 0.6294498381877023,
"repo_name": "NTUTVisualScript/Visual_Script",
"id": "92bee4214f6edad54213a330b52b7dc0e7f2ddd4",
"size": "618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GeometrA/src/path.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "255"
},
{
"name": "CSS",
"bytes": "139173"
},
{
"name": "HTML",
"bytes": "1915049"
},
{
"name": "JavaScript",
"bytes": "5887938"
},
{
"name": "Python",
"bytes": "379530"
},
{
"name": "Shell",
"bytes": "3908"
}
],
"symlink_target": ""
} |
from flask import Blueprint
apiroot = Blueprint("apiroot", __name__)
from . import auth, root
| {
"content_hash": "95a707f827214a26bcaefc48aeb76446",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 40,
"avg_line_length": 19.2,
"alnum_prop": 0.71875,
"repo_name": "lsst-sqre/ltd-keeper",
"id": "b551386da4a2a4987f8e518df36f01400b417205",
"size": "96",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "keeper/apiroot/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1137"
},
{
"name": "Dockerfile",
"bytes": "1764"
},
{
"name": "Jinja",
"bytes": "2878"
},
{
"name": "Makefile",
"bytes": "3607"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "436185"
},
{
"name": "Shell",
"bytes": "7523"
}
],
"symlink_target": ""
} |
import pyrax
from raxas.core_plugins.base import PluginBase
from datetime import datetime, timedelta
import logging
from pyrax.exceptions import NotFound
class Raxclb(PluginBase):
""" Rackspace cloud load balancer plugin.
"""
def __init__(self, scaling_group):
super(Raxclb, self).__init__(scaling_group)
self.scaling_group = scaling_group
config = scaling_group.plugin_config.get(self.name)
self.scale_up_threshold = config.get('scale_up_threshold', 50)
self.scale_down_threshold = config.get('scale_down_threshold', 1)
self.check_type = config.get('check_type', '')
self.lb_ids = config.get('loadbalancers', [])
self.check_time = 2
self.scaling_group = scaling_group
@property
def name(self):
return 'raxclb'
def make_decision(self):
"""
This function decides to scale up or scale down
:returns: 1 scale up
0 do nothing
-1 scale down
None No data available
"""
logger = logging.getLogger(__name__)
clb = pyrax.cloud_loadbalancers
if not self.lb_ids:
launch_config = self.scaling_group.launch_config
if launch_config is None:
return None
try:
self.lb_ids = [lb.get('loadBalancerId') for lb
in launch_config.get('load_balancers')]
except TypeError:
logger.error('No loadbalancer found, please either define a '
'loadbalancer to check or add one to the scaling group.')
return None
start_time = datetime.utcnow() - timedelta(hours=int(self.check_time))
results = []
active_server_count = self.scaling_group.state['active_capacity']
self.scale_up_threshold = self.scale_up_threshold * active_server_count
self.scale_down_threshold = self.scale_down_threshold * active_server_count
if self.check_type.upper() == 'SSL':
hist_check = 'averageNumConnectionsSsl'
cur_check = 'currentConnSsl'
else:
hist_check = 'averageNumConnections'
cur_check = 'currentConn'
for lb in self.lb_ids:
try:
check_clb = clb.get(lb)
except NotFound:
logger.error('Loadbalancer specified does not exist')
return None
usage = check_clb.get_usage(start=start_time)
current_usage = check_clb.get_stats()
records = []
for record in usage.get('loadBalancerUsageRecords'):
records.append(record.get(hist_check))
try:
current_conn = current_usage.get(cur_check)
average_historical = sum(records) / len(records)
average = ((current_conn * 1.5) + average_historical) / 2
except ZeroDivisionError:
average = current_usage.get(cur_check)
if average > self.scale_up_threshold:
results.append(1)
logger.info("Raxclb reports scale up for lb %s", lb)
elif average < self.scale_down_threshold:
results.append(-1)
logger.info("Raxclb reports scale down for lb %s", lb)
else:
results.append(0)
logger.info("Raxclb reports normal for lb %s", lb)
return sum(results)
| {
"content_hash": "4b596ec03cdeebfabfcd0d208c1a1d7e",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 86,
"avg_line_length": 33.160377358490564,
"alnum_prop": 0.5644381223328592,
"repo_name": "boxidau/rax-autoscaler",
"id": "5ba921f930c8f4e47b8d274ce3f61ba723939c05",
"size": "4211",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "raxas/core_plugins/raxclb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "27292"
},
{
"name": "JavaScript",
"bytes": "1463"
},
{
"name": "Makefile",
"bytes": "8056"
},
{
"name": "Python",
"bytes": "102232"
}
],
"symlink_target": ""
} |
from _external import *
from boost import *
boost_gil = HeaderChecker( 'boost_gil', ['boost/gil/gil_all.hpp'], 'c++',
dependencies=[boost] )
| {
"content_hash": "efd32daf3c1313dd7c4f0e84e7162fde",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 73,
"avg_line_length": 33.8,
"alnum_prop": 0.5739644970414202,
"repo_name": "tuttleofx/sconsProject",
"id": "f1b27ba2d427f33fa1103b23f14d3f95c0dc357c",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoconf/boost_gil.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "150692"
}
],
"symlink_target": ""
} |
import logging
from django_openid_auth.views import login_begin as django_login_begin, login_complete
from desktop.lib.django_util import render
from django.core import urlresolvers
from django.conf import settings
from django.shortcuts import render_to_response
from django.template import RequestContext
import libopenid.conf
from libopenid.backend import OpenIDBackend
from libopenid.forms import OpenIDLoginFormExt
__all__ = ['login_begin', 'login_complete']
def login_begin(request):
redirect_to = request.REQUEST.get('next', '/')
is_first_login_ever = OpenIDBackend.is_first_login_ever()
request.session.set_test_cookie()
openid_url = getattr(settings, 'OPENID_SSO_SERVER_URL', None)
identity_url_prefix = getattr(settings, 'OPENID_IDENTITY_URL_PREFIX', None)
#Case of centralized server endpoint Get request
if openid_url is not None:
if request.method == 'GET':
return render_to_response('openid-login.html', {
'action': urlresolvers.reverse('openid-login'),
'next': redirect_to,
'first_login_ever': is_first_login_ever,
'hide_field': True
}, context_instance=RequestContext(request))
return django_login_begin(request, template_name='openid-login.html', form_class = OpenIDLoginFormExt)
setattr(login_begin, 'login_notrequired', True)
setattr(login_complete, 'login_notrequired', True)
| {
"content_hash": "68f4a205d59bb1d8af02b2eb4c63d797",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 104,
"avg_line_length": 32.92857142857143,
"alnum_prop": 0.7368040491684743,
"repo_name": "todaychi/hue",
"id": "1e2ddd5f1c957bc9ea082fe3a5fd6aa945e7eae5",
"size": "2175",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/libs/libopenid/src/libopenid/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2717013"
},
{
"name": "C++",
"bytes": "199945"
},
{
"name": "CSS",
"bytes": "691188"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "23983570"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "5432201"
},
{
"name": "Lex",
"bytes": "39802"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "146585"
},
{
"name": "Mako",
"bytes": "3525679"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "45877726"
},
{
"name": "Roff",
"bytes": "16669"
},
{
"name": "Shell",
"bytes": "46975"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "353353"
}
],
"symlink_target": ""
} |
import random
inclusive_range = (1, 100)
print("Guess my target number that is between %i and %i (inclusive).\n" % inclusive_range)
@@ begin question get_random_number
@@ points: 10
@@ time: 1 minute
target = random.randint(*inclusive_range)
@@ end question
answer, i = None, 0
while answer != target:
i += 1
answer = input("Your guess(%i): " % i)
try:
@@ begin question convert_to_int
@@ points: 10
@@ time: 1 minute
answer = int(answer)
@@ end question
except ValueError:
print(" I don't understand your input of '%s' ?" % answer)
continue
@@ begin question detect_number_out_of_range
@@ points: 10
@@ time: 1 minute
if answer < inclusive_range[0] or answer > inclusive_range[1]:
@@ end question
print(" Out of range!")
continue
@@ begin question detect_correct_guess
@@ points: 10
@@ time: 1 minute
if answer == target:
@@ end question
print(" Ye-Haw!!")
break
if answer < target: print(" Too low.")
if answer > target: print(" Too high.")
print("\nThanks for playing.")
| {
"content_hash": "c2ee01f127bab2ca7da8b553edb4028e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 90,
"avg_line_length": 24.930232558139537,
"alnum_prop": 0.6268656716417911,
"repo_name": "RaphaelArkadyMeyer/LiveCoding",
"id": "d421976df722174b4254cc89083b064a3435e579",
"size": "1072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/guess_number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "251"
},
{
"name": "C",
"bytes": "128"
},
{
"name": "C++",
"bytes": "1785"
},
{
"name": "Python",
"bytes": "23096"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
import json
import sys
def q_to_fdict(q):
fdict = {}
for lat in q['LAT']:
if (lat['type'] != "WordnetLAT"):
fdict['lat/' + lat['text'] + '/' + lat['type']] = 1
for sv in q['SV']:
fdict['sv'] = sv
if (len(q['SV']) == 0):
fdict['sv_not_present'] = 1
return fdict
if __name__ == '__main__':
parser = ArgumentParser(description='Training question classifier')
parser.add_argument("train_data", help="training data set in json format with features")
parser.add_argument("train_data_tsv", help="training data set in tsv format with labels")
parser.add_argument("test_data", help="testing data set in json format with features")
parser.add_argument("test_data_tsv", help="testing data set in tsv format with labels")
args = parser.parse_args()
with open(args.train_data, 'r') as f:
fdict = [q_to_fdict(q) for q in json.load(f)]
Xdict = DictVectorizer()
trainX = Xdict.fit_transform(fdict)
with open(args.test_data, 'r') as f:
fdict = [q_to_fdict(q) for q in json.load(f)]
testX = Xdict.transform(fdict)
with open(args.train_data_tsv, 'r') as f:
trainY = [line.split("\t")[3].replace("\n","") for line in f]
with open(args.test_data_tsv, 'r') as f:
testY = [line.split("\t")[3].replace("\n","") for line in f]
cfier = LogisticRegression(solver='lbfgs', multi_class='multinomial')
cfier.fit(trainX, trainY)
print ("// Accuracy on training set: " + str(cfier.score(trainX, trainY)))
#Temporary solution: cross validation
res = cross_validation.cross_val_score(cfier, trainX, trainY, cv=10)
print ("// Average accuracy over 10-fold cross valiadtion: " + str(sum(res) / float(len(res))))
print ("// Accuracy on test data set: " + str(cfier.score(testX, testY)))
print ("// Logistic Regression parameters: " + str(cfier.get_params()))
data = {}
data["weight_vector"] = cfier.coef_.tolist()
data["intercept"] = cfier.intercept_.tolist()
data["feature_indices"] = Xdict.vocabulary_
lab = set()
[lab.add(e) for e in trainY]
data["labels"] = sorted(lab)
json.dump(data, sys.stdout)
| {
"content_hash": "be43ba880d04cf112d5fde5b1c35ff20",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 96,
"avg_line_length": 39.12068965517241,
"alnum_prop": 0.6632877919788454,
"repo_name": "vineetk1/yodaqa",
"id": "e2c35f47a1e40c9165b018b2ec979d75851ed391",
"size": "3448",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/ml/qclass/train_question_classifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7193"
},
{
"name": "Groovy",
"bytes": "887"
},
{
"name": "HTML",
"bytes": "4187"
},
{
"name": "Java",
"bytes": "820348"
},
{
"name": "JavaScript",
"bytes": "27315"
},
{
"name": "Jupyter Notebook",
"bytes": "279065"
},
{
"name": "Perl",
"bytes": "4472"
},
{
"name": "Python",
"bytes": "86314"
},
{
"name": "Shell",
"bytes": "20330"
},
{
"name": "XSLT",
"bytes": "24923"
}
],
"symlink_target": ""
} |
"""
Django settings for helloworld project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h#!izko_ejn@&0g@a#yu$tpuh-a=v$vvcfq5p*y#@2rzfcojn7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test.helloworld.helloworld.urls'
WSGI_APPLICATION = 'test.helloworld.helloworld.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "921933b995e25257e536da71500346fa",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 71,
"avg_line_length": 25.433734939759034,
"alnum_prop": 0.7280909521553766,
"repo_name": "JianMingZhuo/WSGIServer",
"id": "e4780735f595786ba47664fd791fc96487c5e32a",
"size": "2737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/helloworld/helloworld/settings.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43226"
}
],
"symlink_target": ""
} |
"""fix altitude trigger
Revision ID: 9a9f4971edcd
Revises: 7471f51011c8
Create Date: 2021-11-30 14:48:23.458154
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9a9f4971edcd'
down_revision = '7471f51011c8'
branch_labels = None
depends_on = None
def upgrade():
op.execute(
"""
CREATE OR REPLACE FUNCTION ref_geo.fct_trg_calculate_alt_minmax()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
DECLARE
the4326geomcol text := quote_ident(TG_ARGV[0]);
thelocalsrid int;
BEGIN
-- si c'est un insert et que l'altitude min ou max est null -> on calcule
IF (TG_OP = 'INSERT' and (new.altitude_min IS NULL or new.altitude_max IS NULL)) THEN
--récupérer le srid local
SELECT INTO thelocalsrid parameter_value::int FROM gn_commons.t_parameters WHERE parameter_name = 'local_srid';
--Calcul de l'altitude
SELECT (ref_geo.fct_get_altitude_intersection(st_transform(hstore(NEW)-> the4326geomcol,thelocalsrid))).* INTO NEW.altitude_min, NEW.altitude_max;
-- si c'est un update et que la geom a changé
-- on vérifie que les altitude ne sont pas null
-- OU si les altitudes ont changé, si oui = elles ont déjà été calculés - on ne relance pas le calcul
ELSIF (
TG_OP = 'UPDATE'
AND NOT public.ST_EQUALS(hstore(OLD)-> the4326geomcol, hstore(NEW)-> the4326geomcol)
and (new.altitude_min = old.altitude_max or new.altitude_max = old.altitude_max)
and not(new.altitude_min is null or new.altitude_max is null)
) then
--IF (new.altitude_min is null or new.altitude_max is null) OR (NOT OLD.altitude_min = NEW.altitude_min or NOT OLD.altitude_max = OLD.altitude_max) THEN
--récupérer le srid local
SELECT INTO thelocalsrid parameter_value::int FROM gn_commons.t_parameters WHERE parameter_name = 'local_srid';
--Calcul de l'altitude
SELECT (ref_geo.fct_get_altitude_intersection(st_transform(hstore(NEW)-> the4326geomcol,thelocalsrid))).* INTO NEW.altitude_min, NEW.altitude_max;
--end IF;
--else
END IF;
RETURN NEW;
END;
$function$
;
"""
)
def downgrade():
op.execute(
"""
CREATE OR REPLACE FUNCTION ref_geo.fct_trg_calculate_alt_minmax()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
DECLARE
the4326geomcol text := quote_ident(TG_ARGV[0]);
thelocalsrid int;
BEGIN
-- si c'est un insert et que l'altitude min ou max est null -> on calcule
IF (TG_OP = 'INSERT' and (new.altitude_min IS NULL or new.altitude_max IS NULL)) THEN
--récupérer le srid local
SELECT INTO thelocalsrid parameter_value::int FROM gn_commons.t_parameters WHERE parameter_name = 'local_srid';
--Calcul de l'altitude
SELECT (ref_geo.fct_get_altitude_intersection(st_transform(hstore(NEW)-> the4326geomcol,thelocalsrid))).* INTO NEW.altitude_min, NEW.altitude_max;
-- si c'est un update et que la geom a changé
ELSIF (TG_OP = 'UPDATE' AND NOT public.ST_EQUALS(hstore(OLD)-> the4326geomcol, hstore(NEW)-> the4326geomcol)) then
-- on vérifie que les altitude ne sont pas null
-- OU si les altitudes ont changé, si oui = elles ont déjà été calculés - on ne relance pas le calcul
IF (new.altitude_min is null or new.altitude_max is null) OR (NOT OLD.altitude_min = NEW.altitude_min or NOT OLD.altitude_max = OLD.altitude_max) THEN
--récupérer le srid local
SELECT INTO thelocalsrid parameter_value::int FROM gn_commons.t_parameters WHERE parameter_name = 'local_srid';
--Calcul de l'altitude
SELECT (ref_geo.fct_get_altitude_intersection(st_transform(hstore(NEW)-> the4326geomcol,thelocalsrid))).* INTO NEW.altitude_min, NEW.altitude_max;
end IF;
else
END IF;
RETURN NEW;
END;
$function$
;
"""
)
| {
"content_hash": "20f14cec53c87a8debed3f02c409339e",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 164,
"avg_line_length": 44.4639175257732,
"alnum_prop": 0.6121029445861349,
"repo_name": "PnEcrins/GeoNature",
"id": "1e2d4b0cd2aaaca541e26f17c4a4534761f43e81",
"size": "4337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/geonature/migrations/versions/9a9f4971edcd_fix_altitude_trigger.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1931"
},
{
"name": "Batchfile",
"bytes": "1151"
},
{
"name": "CSS",
"bytes": "763718"
},
{
"name": "HTML",
"bytes": "651"
},
{
"name": "JavaScript",
"bytes": "16182773"
},
{
"name": "PHP",
"bytes": "4058658"
},
{
"name": "PLpgSQL",
"bytes": "893372"
},
{
"name": "Shell",
"bytes": "33147"
}
],
"symlink_target": ""
} |
import os.path
import os
import logging
import hashlib
import json
import datetime
from pytz import timezone
import iso8601
import threading
import queue
import xlsxwriter
import sys
log = logging.getLogger(__name__)
time_zone = timezone(os.getenv('TZ', "America/New_York"))
class Inventory:
def __init__(self, path, dirs=None, files=None):
self.path = path
self.dirs = set(dirs or [])
self.files = files or {}
self.inventory_filepath = self._generate_inventory_filepath(self.path)
self.timestamp = datetime_now()
@staticmethod
def perform_inventory(fs_path, base_fs_path, fixity_threads=1):
log.info('Inventorying %s', fs_path)
inventory = Inventory(Inventory._remove_base_path(fs_path, base_fs_path))
child_files = []
for child in os.listdir(fs_path):
child_path = os.path.join(fs_path, child)
if os.path.isdir(child_path):
inventory.dirs.add(child)
elif os.path.isfile(child_path):
# inventory.files[child] = Inventory._generate_fixity(child_path)
child_files.append((child, child_path))
if child_files:
q = queue.Queue()
threads = []
thread_count = min(len(child_files), fixity_threads)
for i in range(thread_count):
t = FixityThread(q, inventory.files)
t.start()
threads.append(t)
for file in child_files:
q.put(file)
# block until all tasks are done
q.join()
# stop workers
for i in range(thread_count):
q.put((None, None))
for t in threads:
t.join()
return inventory
@staticmethod
def perform_recursive_inventory(fs_path, fs_base_path, fixity_threads=1):
inventories = [Inventory.perform_inventory(fs_path, fs_base_path, fixity_threads=fixity_threads)]
for dir_name in inventories[0].dirs:
inventories.extend(Inventory.perform_recursive_inventory(os.path.join(fs_path, dir_name), fs_base_path))
return inventories
@staticmethod
def _remove_base_path(path, base_path):
return os.path.relpath(path, base_path)
@staticmethod
def _generate_fixity(filepath):
sha256 = hashlib.sha256()
with open(filepath, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
@staticmethod
def _generate_inventory_filepath(path):
digest = hashlib.sha256(path.encode('utf-8')).hexdigest()
return os.path.join(os.path.join(*[digest[i:i + 8] for i in range(0, 64, 8)]), '{}.json'.format(digest))
def as_dict(self):
return {
'path': self.path,
'dirs': sorted(list(self.dirs)),
'files': self.files,
'timestamp': self.timestamp.isoformat()
}
def write(self, base_inventory_path):
filepath = os.path.join(base_inventory_path, self.inventory_filepath)
log.debug('Writing inventory for %s to %s', self.path, filepath)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as f:
json.dump(self.as_dict(), f, indent=2)
return filepath
def diff(self, that_inventory):
assert self.path == that_inventory.path
directories_missing_from_this = that_inventory.dirs - self.dirs
directories_missing_from_that = self.dirs - that_inventory.dirs
files_missing_from_this = self._files_missing_from_that(that_inventory.files, self.files)
files_missing_from_that = self._files_missing_from_that(self.files, that_inventory.files)
file_fixity_mismatch = {}
for file, fixity in self.files.items():
if file in that_inventory.files and fixity != that_inventory.files[file]:
file_fixity_mismatch[file] = (fixity, that_inventory.files[file])
return (directories_missing_from_this, directories_missing_from_that,
files_missing_from_this, files_missing_from_that, file_fixity_mismatch)
@staticmethod
def _files_missing_from_that(this_files, that_files):
missing_files = {}
for file in set(this_files.keys()) - set(that_files.keys()):
missing_files[file] = this_files[file]
return missing_files
@staticmethod
def read(path, base_inventory_path):
inventory_filepath = os.path.join(base_inventory_path, Inventory._generate_inventory_filepath(path))
log.debug('Reading inventory for %s from %s', path, inventory_filepath)
with open(inventory_filepath) as f:
inventory_json = json.load(f)
return Inventory(inventory_json['path'], inventory_json['dirs'], inventory_json['files'])
def update(self, directories_missing_from_this, directories_missing_from_that, files_missing_from_this,
files_missing_from_that, file_fixity_mismatch, timestamp=None):
log.info('Updating inventory for %s', self.path)
# Add directories missing from this
self.dirs = self.dirs | directories_missing_from_this
# Remove directories missing from that
self.dirs = self.dirs - directories_missing_from_that
# Add files missing from this
self.files.update(files_missing_from_this)
# Remove files missing from that
for file in files_missing_from_that.keys():
del self.files[file]
# Update file fixity mismatches
for file, (_, fixity) in file_fixity_mismatch.items():
assert self.files[file] != fixity
self.files[file] = fixity
self.timestamp = timestamp or datetime_now()
class FixityThread(threading.Thread):
def __init__(self, queue, files, *args, **kwargs):
self.exc = None
self.queue = queue
self.files = files
super(FixityThread, self).__init__(*args, **kwargs)
def run(self):
while True:
filename, filepath = self.queue.get()
if filepath is None:
break
try:
sha256 = hashlib.sha256()
with open(filepath, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
sha256.update(data)
self.files[filename] = sha256.hexdigest()
except:
# Save details of the exception thrown but don't rethrow,
import sys
self.exc = sys.exc_info()
self.queue.task_done()
def join(self):
threading.Thread.join(self)
if self.exc:
msg = "Thread '%s' threw an exception: %s" % (self.getName(), self.exc[1])
new_exc = Exception(msg)
raise new_exc.with_traceback(self.exc[2])
class InventoryDiff:
def __init__(self, path, directories_missing_from_inventory, directories_missing_from_fs,
files_missing_from_inventory, files_missing_from_fs, file_fixity_mismatch, timestamp=None):
self.path = path
self.directories_missing_from_fs = directories_missing_from_fs
self.directories_missing_from_inventory = directories_missing_from_inventory
self.files_missing_from_fs = files_missing_from_fs
self.files_missing_from_inventory = files_missing_from_inventory
self.file_fixity_mismatch = file_fixity_mismatch
self.timestamp = timestamp or datetime_now()
def as_dict(self):
return {
'path': self.path,
'directories_missing_from_fs': list(self.directories_missing_from_fs),
'directories_missing_from_inventory': list(self.directories_missing_from_inventory),
'files_missing_from_fs': self.files_missing_from_fs,
'files_missing_from_inventory': self.files_missing_from_inventory,
'file_fixity_mismatch': self.file_fixity_mismatch,
'timestamp': self.timestamp.isoformat()
}
def has_diffs(self):
return self.directories_missing_from_fs or self.directories_missing_from_inventory or \
self.files_missing_from_fs or self.files_missing_from_inventory or self.file_fixity_mismatch
@staticmethod
def generate_inventory_diff(fs_inventory, inventory_inventory):
return InventoryDiff(fs_inventory.path, *inventory_inventory.diff(fs_inventory))
@staticmethod
def from_dict(inventory_dict):
return InventoryDiff(
inventory_dict['path'],
set(inventory_dict['directories_missing_from_inventory']),
set(inventory_dict['directories_missing_from_fs']),
inventory_dict['files_missing_from_inventory'],
inventory_dict['files_missing_from_fs'],
inventory_dict['file_fixity_mismatch'],
parse_datetime(inventory_dict['timestamp'])
)
class InventoryNote:
def __init__(self, text, user, timestamp=None):
self.text = text
self.user = user
self.timestamp = timestamp or datetime_now()
def as_dict(self):
return {
'text': self.text,
'user': self.user,
'timestamp': self.timestamp.isoformat()
}
@staticmethod
def from_dict(note_dict):
return InventoryNote(
note_dict['text'],
note_dict['user'],
parse_datetime(note_dict['timestamp'])
)
class InventoryReport:
def __init__(self, base_path, inventory_diffs=None, timestamp=None, applied_timestamp=None, notes=None):
self.base_path = base_path
self.timestamp = timestamp or datetime_now()
self.inventory_diffs = inventory_diffs or []
timestamp = self.timestamp.isoformat()
self.report_time_dir = os.path.join(timestamp[0:4], timestamp[5:7], timestamp[8:10])
self.report_filename = timestamp.replace(".", "-").replace(":", "-")
self.report_filepath = os.path.join(self.report_time_dir, '{}.json'.format(self.report_filename))
self.applied_timestamp = applied_timestamp
self.notes = notes or []
def applied(self):
self.applied_timestamp = datetime_now()
def add_note(self, text, user, timestamp=None):
self.notes.append(InventoryNote(text, user, timestamp=timestamp))
def as_dict(self):
report_dict = {
'base_path': self.base_path,
'timestamp': self.timestamp.isoformat(),
'applied_timestamp': self.applied_timestamp.isoformat() if self.applied_timestamp else None,
'inventory_diffs': [],
'notes': []
}
for inventory_diff in self.inventory_diffs:
report_dict['inventory_diffs'].append(inventory_diff.as_dict())
for note in self.notes:
report_dict['notes'].append(note.as_dict())
return report_dict
def write(self, base_report_path):
filepath = os.path.join(base_report_path, self.report_filepath)
log.debug('Writing JSON report for %s to %s', self.base_path, filepath)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as f:
json.dump(self.as_dict(), f, indent=2)
return filepath
def write_excel(self, report_path):
filepath = os.path.join(report_path, self.report_time_dir, 'inventory_report_{}.xlsx'.format(self.report_filename))
log.debug('Writing Excel report for %s to %s', self.base_path, filepath)
wb = xlsxwriter.Workbook(filepath)
bold = wb.add_format({'bold': True})
report_ws = wb.add_worksheet('Report')
directories_missing_from_fs_ws = wb.add_worksheet('Dirs missing from fs')
directories_missing_from_fs_ws.write(0, 0, 'Path', bold)
directories_missing_from_fs_ws.write(0, 1, 'Directory', bold)
directories_missing_from_fs_row = 1
directories_missing_from_inventory_ws = wb.add_worksheet('Dirs missing from inventory')
directories_missing_from_inventory_ws.write(0, 0, 'Path', bold)
directories_missing_from_inventory_ws.write(0, 1, 'Directory', bold)
directories_missing_from_inventory_row = 1
files_missing_from_fs_ws = wb.add_worksheet('Files missing from fs')
files_missing_from_fs_ws.write(0, 0, 'Path', bold)
files_missing_from_fs_ws.write(0, 1, 'Directory', bold)
files_missing_from_fs_ws.write(0, 2, 'Fixity', bold)
files_missing_from_fs_row = 1
files_missing_from_inventory_ws = wb.add_worksheet('Files missing from inventory')
files_missing_from_inventory_ws.write(0, 0, 'Path', bold)
files_missing_from_inventory_ws.write(0, 1, 'Directory', bold)
files_missing_from_inventory_ws.write(0, 2, 'Fixity', bold)
files_missing_from_inventory_row = 1
file_fixity_mismatch_ws = wb.add_worksheet('File fixity mismatch')
file_fixity_mismatch_ws.write(0, 0, 'Path', bold)
file_fixity_mismatch_ws.write(0, 1, 'Directory', bold)
file_fixity_mismatch_ws.write(0, 2, 'Expected fixity', bold)
file_fixity_mismatch_ws.write(0, 3, 'Actual fixity', bold)
file_fixity_mismatch_row = 1
for inventory_diff in self.inventory_diffs:
for directory in inventory_diff.directories_missing_from_fs:
directories_missing_from_fs_ws.write(directories_missing_from_fs_row, 0, inventory_diff.path)
directories_missing_from_fs_ws.write(directories_missing_from_fs_row, 1, directory)
directories_missing_from_fs_row += 1
for directory in inventory_diff.directories_missing_from_inventory:
directories_missing_from_inventory_ws.write(directories_missing_from_inventory_row, 0,
inventory_diff.path)
directories_missing_from_inventory_ws.write(directories_missing_from_inventory_row, 1, directory)
directories_missing_from_inventory_row += 1
for file, fixity in inventory_diff.files_missing_from_fs.items():
files_missing_from_fs_ws.write(files_missing_from_fs_row, 0, inventory_diff.path)
files_missing_from_fs_ws.write(files_missing_from_fs_row, 1, file)
files_missing_from_fs_ws.write(files_missing_from_fs_row, 2, fixity)
files_missing_from_fs_row += 1
for file, fixity in inventory_diff.files_missing_from_inventory.items():
files_missing_from_inventory_ws.write(files_missing_from_inventory_row, 0, inventory_diff.path)
files_missing_from_inventory_ws.write(files_missing_from_inventory_row, 1, file)
files_missing_from_inventory_ws.write(files_missing_from_inventory_row, 2, fixity)
files_missing_from_inventory_row += 1
for file, fixities in inventory_diff.file_fixity_mismatch.items():
file_fixity_mismatch_ws.write(file_fixity_mismatch_row, 0, inventory_diff.path)
file_fixity_mismatch_ws.write(file_fixity_mismatch_row, 1, file)
file_fixity_mismatch_ws.write(file_fixity_mismatch_row, 2, fixities[0])
file_fixity_mismatch_ws.write(file_fixity_mismatch_row, 3, fixities[1])
file_fixity_mismatch_row += 1
report_ws.write(0, 0, 'Content base path:', bold)
report_ws.write(0, 1, self.base_path)
report_ws.write(1, 0, 'Report filepath:', bold)
report_ws.write(1, 1, self.report_filepath)
report_ws.write(2, 0, 'Timestamp:', bold)
report_ws.write(2, 1, self.timestamp.isoformat())
report_ws.write(3, 0, 'Applied timestamp:', bold)
if self.applied_timestamp:
report_ws.write(3, 1, self.applied_timestamp.isoformat())
report_ws.write(4, 0, 'Dirs missing from fs:', bold)
report_ws.write(4, 1, directories_missing_from_fs_row-1)
report_ws.write(4, 0, 'Dirs missing from inventory:', bold)
report_ws.write(4, 1, directories_missing_from_inventory_row-1)
report_ws.write(5, 0, 'Files missing from fs:', bold)
report_ws.write(5, 1, files_missing_from_fs_row-1)
report_ws.write(6, 0, 'Files missing from inventory:', bold)
report_ws.write(6, 1, files_missing_from_inventory_row-1)
report_ws.write(7, 0, 'Fixity mismatches:', bold)
report_ws.write(7, 1, file_fixity_mismatch_row-1)
if self.notes:
report_ws.write(9, 0, 'Note text', bold)
report_ws.write(9, 1, 'User', bold)
report_ws.write(9, 2, 'Timestamp', bold)
report_row = 10
for note in self.notes:
report_ws.write(report_row, 0, note.text)
report_ws.write(report_row, 1, note.user)
report_ws.write(report_row, 2, note.timestamp.isoformat())
report_row += 1
wb.close()
return filepath
@staticmethod
def read(report_filepath):
with open(report_filepath) as f:
report_json = json.load(f)
inventory_report = InventoryReport(report_json['base_path'],
timestamp=parse_datetime(report_json['timestamp']),
applied_timestamp=parse_datetime(report_json['applied_timestamp'])
if report_json['applied_timestamp'] else None)
for inventory_diff_dict in report_json['inventory_diffs']:
inventory_report.inventory_diffs.append(InventoryDiff.from_dict(inventory_diff_dict))
for note_dict in report_json['notes']:
inventory_report.notes.append(InventoryNote.from_dict(note_dict))
return inventory_report
def datetime_now():
return datetime.datetime.now(time_zone)
def parse_datetime(datetime_str):
return iso8601.parse_date(datetime_str)
if __name__ == '__main__':
print('You want to run inventory_manager.py, not inventory.py.')
sys.exit(1)
| {
"content_hash": "a946d777422004da3c970d59ea1371a7",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 123,
"avg_line_length": 43.22458628841608,
"alnum_prop": 0.6137606650623496,
"repo_name": "gwu-libraries/inventory",
"id": "c6c74162c401de0ca71b86c93f2832d416dbdc72",
"size": "18284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44967"
}
],
"symlink_target": ""
} |
from cStringIO import StringIO
import jsmin
import os.path
import re
import sys
def main(argv):
if len(argv) < 3:
print('usage: %s input_file imports_dir output_file no_minify' % argv[0])
return 1
input_file_name = argv[1]
imports_dir = argv[2]
output_file_name = argv[3]
no_minify = len(argv) > 4 and argv[4]
input_file = open(input_file_name, 'r')
input_script = input_file.read()
input_file.close()
def replace(match):
import_file_name = match.group(1)
full_path = os.path.join(imports_dir, import_file_name)
if not os.access(full_path, os.F_OK):
raise Exception('File %s referenced in %s not found on any source paths, '
'check source tree for consistency' %
(import_file_name, input_file_name))
import_file = open(full_path, 'r')
import_script = import_file.read()
import_file.close()
return import_script
output_script = re.sub(r'importScripts?\([\'"]([^\'"]+)[\'"]\)', replace, input_script)
if re.search("importScripts?\(\"", output_script):
raise Exception('Unresolved "importScript" statements found in "%s". '
'Make sure you call "importScript" in module heads only.' %
(output_file_name))
if os.path.exists(output_file_name):
os.remove(output_file_name)
output_file = open(output_file_name, 'w')
if not no_minify:
output_script = jsmin.jsmin(output_script)
output_file.write(output_script)
output_file.close()
# Touch output file directory to make sure that Xcode will copy
# modified resource files.
if sys.platform == 'darwin':
output_dir_name = os.path.dirname(output_file_name)
os.utime(output_dir_name, None)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "3ecb95dc68045a9e95032fdd45b4a295",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 91,
"avg_line_length": 33.280701754385966,
"alnum_prop": 0.5998945703742752,
"repo_name": "espadrine/opera",
"id": "b65c35019032b3ca8a269c4e77967823bf30f2bd",
"size": "3597",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/src/third_party/WebKit/Source/devtools/scripts/inline_js_imports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='Logentries',
version='0.7',
author='Mark Lacomber',
author_email='marklacomber@gmail.com',
packages=['logentries'],
scripts=[],
url='http://pypi.python.org/pypi/Logentries/',
license='LICENSE.txt',
description='Python Logger plugin to send logs to Logentries',
long_description=open('README.txt').read(),
install_requires=[
"certifi",
],
)
| {
"content_hash": "b8ccbd789ec6316d98d2db7b301a0279",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 26.058823529411764,
"alnum_prop": 0.6455981941309256,
"repo_name": "siniar1990/le_python",
"id": "09fe454f430ceb68015c8d210dad50b0cdf1a1ea",
"size": "443",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9432"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12); | {
"content_hash": "9b1fc425282fb087c055f1220a85a1a1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 166,
"avg_line_length": 38,
"alnum_prop": 0.706766917293233,
"repo_name": "antoinecarme/pyaf",
"id": "4608492efa1b7c5ce0ad13a217bfd24ff249a0eb",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_7/ar_12/test_artificial_32_Quantization_Lag1Trend_7_12_0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
test/integration
~~~~~~~~~~~~~~~~
This file defines integration-type tests for hyper. These are still not fully
hitting the network, so that's alright.
"""
import requests
import threading
import hyper
import hyper.http11.connection
import pytest
from hyper.compat import ssl
from hyper.contrib import HTTP20Adapter
from hyper.packages.hyperframe.frame import (
Frame, SettingsFrame, WindowUpdateFrame, DataFrame, HeadersFrame,
GoAwayFrame,
)
from hyper.packages.hpack.hpack import Encoder
from hyper.packages.hpack.huffman import HuffmanEncoder
from hyper.packages.hpack.huffman_constants import (
REQUEST_CODES, REQUEST_CODES_LENGTH
)
from hyper.http20.exceptions import ConnectionError
from server import SocketLevelTest
# Turn off certificate verification for the tests.
if ssl is not None:
hyper.tls._context = hyper.tls.init_context()
hyper.tls._context.check_hostname = False
hyper.tls._context.verify_mode = ssl.CERT_NONE
# Cover our bases because NPN doesn't yet work on all our test platforms.
hyper.http20.connection.H2_NPN_PROTOCOLS += ['', None]
def decode_frame(frame_data):
f, length = Frame.parse_frame_header(frame_data[:9])
f.parse_body(memoryview(frame_data[9:9 + length]))
assert 9 + length == len(frame_data)
return f
def build_headers_frame(headers, encoder=None):
f = HeadersFrame(1)
e = encoder
if e is None:
e = Encoder()
e.huffman_coder = HuffmanEncoder(REQUEST_CODES, REQUEST_CODES_LENGTH)
f.data = e.encode(headers)
f.flags.add('END_HEADERS')
return f
def receive_preamble(sock):
# Receive the HTTP/2 'preamble'.
first = sock.recv(65535)
# Work around some bugs: if the first message received was only the PRI
# string, aim to receive a settings frame as well.
if len(first) <= len(b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'):
sock.recv(65535)
sock.send(SettingsFrame(0).serialize())
sock.recv(65535)
return
class TestHyperIntegration(SocketLevelTest):
# These are HTTP/2 tests.
h2 = True
def test_connection_string(self):
self.set_up()
# Confirm that we send the connection upgrade string and the initial
# SettingsFrame.
data = []
send_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# We should get two packets: one connection header string, one
# SettingsFrame.
first = sock.recv(65535)
second = sock.recv(65535)
data.append(first)
data.append(second)
# We need to send back a SettingsFrame.
f = SettingsFrame(0)
sock.send(f.serialize())
send_event.wait()
sock.close()
self._start_server(socket_handler)
conn = self.get_connection()
conn.connect()
send_event.set()
assert data[0] == b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'
self.tear_down()
def test_initial_settings(self):
self.set_up()
# Confirm that we send the connection upgrade string and the initial
# SettingsFrame.
data = []
send_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# We should get two packets: one connection header string, one
# SettingsFrame.
first = sock.recv(65535)
second = sock.recv(65535)
data.append(first)
data.append(second)
# We need to send back a SettingsFrame.
f = SettingsFrame(0)
sock.send(f.serialize())
send_event.wait()
sock.close()
self._start_server(socket_handler)
conn = self.get_connection()
conn.connect()
send_event.set()
# Get the second chunk of data and decode it into a frame.
data = data[1]
f = decode_frame(data)
assert isinstance(f, SettingsFrame)
assert f.stream_id == 0
assert f.settings == {
SettingsFrame.ENABLE_PUSH: 0,
}
self.tear_down()
def test_stream_level_window_management(self):
self.set_up()
data = []
send_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# Dispose of the first two packets.
sock.recv(65535)
sock.recv(65535)
# Send a Settings frame that reduces the flow-control window to
# 64 bytes.
f = SettingsFrame(0)
f.settings[SettingsFrame.INITIAL_WINDOW_SIZE] = 64
sock.send(f.serialize())
# Grab three frames, the settings ACK, the initial headers frame,
# and the first data frame.
for x in range(0, 3):
data.append(sock.recv(65535))
# Send a WindowUpdate giving more window room to the stream.
f = WindowUpdateFrame(1)
f.window_increment = 64
sock.send(f.serialize())
# Send one that gives more room to the connection.
f = WindowUpdateFrame(0)
f.window_increment = 64
sock.send(f.serialize())
# Reeive the remaining frame.
data.append(sock.recv(65535))
send_event.set()
# We're done.
sock.close()
self._start_server(socket_handler)
conn = self.get_connection()
conn.putrequest('GET', '/')
conn.endheaders()
# Send the first data chunk. This is 32 bytes.
sd = b'a' * 32
conn.send(sd)
# Send the second one. This should block until the WindowUpdate comes
# in.
sd = sd * 2
conn.send(sd, final=True)
assert send_event.wait(0.3)
# Decode the frames.
frames = [decode_frame(d) for d in data]
# We care about the last two. The first should be a data frame
# containing 32 bytes.
assert (isinstance(frames[-2], DataFrame) and
not isinstance(frames[-2], HeadersFrame))
assert len(frames[-2].data) == 32
# The second should be a data frame containing 64 bytes.
assert isinstance(frames[-1], DataFrame)
assert len(frames[-1].data) == 64
self.tear_down()
def test_connection_context_manager(self):
self.set_up()
data = []
send_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# We should get two packets: one connection header string, one
# SettingsFrame.
first = sock.recv(65535)
second = sock.recv(65535)
data.append(first)
data.append(second)
# We need to send back a SettingsFrame.
f = SettingsFrame(0)
sock.send(f.serialize())
send_event.wait()
sock.recv(65535)
sock.close()
self._start_server(socket_handler)
with self.get_connection() as conn:
conn.connect()
send_event.set()
# Check that we closed the connection.
assert conn._sock == None
self.tear_down()
def test_closed_responses_remove_their_streams_from_conn(self):
self.set_up()
recv_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# We're going to get the two messages for the connection open, then
# a headers frame.
receive_preamble(sock)
sock.recv(65535)
# Now, send the headers for the response.
f = build_headers_frame([(':status', '200')])
f.stream_id = 1
sock.send(f.serialize())
# Wait for the message from the main thread.
recv_event.wait()
sock.close()
self._start_server(socket_handler)
conn = self.get_connection()
conn.request('GET', '/')
resp = conn.get_response()
# Close the response.
resp.close()
recv_event.set()
assert not conn.streams
self.tear_down()
def test_receiving_responses_with_no_body(self):
self.set_up()
recv_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# We get two messages for the connection open and then a HEADERS
# frame.
receive_preamble(sock)
sock.recv(65535)
# Now, send the headers for the response. This response has no body.
f = build_headers_frame([(':status', '204'), ('content-length', '0')])
f.flags.add('END_STREAM')
f.stream_id = 1
sock.send(f.serialize())
# Wait for the message from the main thread.
recv_event.wait()
sock.close()
self._start_server(socket_handler)
conn = self.get_connection()
conn.request('GET', '/')
resp = conn.get_response()
# Confirm the status code.
assert resp.status == 204
# Confirm that we can read this, but it has no body.
assert resp.read() == b''
assert resp._stream._in_window_manager.document_size == 0
# Awesome, we're done now.
recv_event.set()
self.tear_down()
def test_receiving_trailers(self):
self.set_up()
recv_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
e = Encoder()
e.huffman_coder = HuffmanEncoder(REQUEST_CODES, REQUEST_CODES_LENGTH)
# We get two messages for the connection open and then a HEADERS
# frame.
receive_preamble(sock)
sock.recv(65535)
# Now, send the headers for the response. This response has no body.
f = build_headers_frame([(':status', '200'), ('content-length', '0')], e)
f.stream_id = 1
sock.send(f.serialize())
# Also send a data frame.
f = DataFrame(1)
f.data = b'have some data'
sock.send(f.serialize())
# Now, send a headers frame again, containing trailing headers.
f = build_headers_frame([('trailing', 'sure'), (':res', 'no')], e)
f.flags.add('END_STREAM')
f.stream_id = 1
sock.send(f.serialize())
# Wait for the message from the main thread.
recv_event.wait()
sock.close()
self._start_server(socket_handler)
conn = self.get_connection()
conn.request('GET', '/')
resp = conn.get_response()
# Confirm the status code.
assert resp.status == 200
# Confirm that we can read this, but it has no body.
assert resp.read() == b'have some data'
assert resp._stream._in_window_manager.document_size == 0
# Confirm that we got the trailing headers, and that they don't contain
# reserved headers.
assert resp.trailers['trailing'] == [b'sure']
assert resp.trailers.get(':res') is None
assert len(resp.headers) == 1
assert len(resp.trailers) == 1
# Awesome, we're done now.
recv_event.set()
self.tear_down()
def test_clean_shut_down(self):
self.set_up()
recv_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# We should get two packets: one connection header string, one
# SettingsFrame. Rather than respond to the packets, send a GOAWAY
# frame with error code 0 indicating clean shutdown.
first = sock.recv(65535)
second = sock.recv(65535)
# Now, send the shut down.
f = GoAwayFrame(0)
f.error_code = 0
sock.send(f.serialize())
# Wait for the message from the main thread.
recv_event.wait()
sock.close()
self._start_server(socket_handler)
conn = self.get_connection()
conn.connect()
# Confirm the connection is closed.
assert conn._sock is None
# Awesome, we're done now.
recv_event.set()
self.tear_down()
def test_unexpected_shut_down(self):
self.set_up()
recv_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# We should get two packets: one connection header string, one
# SettingsFrame. Rather than respond to the packets, send a GOAWAY
# frame with error code 0 indicating clean shutdown.
first = sock.recv(65535)
second = sock.recv(65535)
# Now, send the shut down.
f = GoAwayFrame(0)
f.error_code = 1
sock.send(f.serialize())
# Wait for the message from the main thread.
sock.close()
recv_event.wait()
self._start_server(socket_handler)
conn = self.get_connection()
with pytest.raises(ConnectionError):
conn.connect()
# Confirm the connection is closed.
assert conn._sock is None
# Awesome, we're done now.
recv_event.set()
self.tear_down()
def test_insecure_connection(self):
self.set_up(secure=False)
data = []
send_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
receive_preamble(sock)
data.append(sock.recv(65535))
send_event.wait()
h = HeadersFrame(1)
h.data = self.get_encoder().encode(
{':status': 200,
'Content-Type': 'not/real',
'Content-Length': 14,
'Server': 'socket-level-server'}
)
h.flags.add('END_HEADERS')
sock.send(h.serialize())
d = DataFrame(1)
d.data = b'nsaislistening'
d.flags.add('END_STREAM')
sock.send(d.serialize())
sock.close()
self._start_server(socket_handler)
c = self.get_connection()
c.request('GET', '/')
send_event.set()
r = c.get_response()
assert r.status == 200
assert len(r.headers) == 3
assert r.headers[b'server'] == [b'socket-level-server']
assert r.headers[b'content-length'] == [b'14']
assert r.headers[b'content-type'] == [b'not/real']
assert r.read() == b'nsaislistening'
self.tear_down()
class TestRequestsAdapter(SocketLevelTest):
# This uses HTTP/2.
h2 = True
def test_adapter_received_values(self, monkeypatch):
self.set_up()
# We need to patch the ssl_wrap_socket method to ensure that we
# forcefully upgrade.
old_wrap_socket = hyper.http11.connection.wrap_socket
def wrap(*args):
sock, _ = old_wrap_socket(*args)
return sock, 'h2'
monkeypatch.setattr(hyper.http11.connection, 'wrap_socket', wrap)
data = []
send_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# Do the handshake: conn header, settings, send settings, recv ack.
receive_preamble(sock)
# Now expect some data. One headers frame.
data.append(sock.recv(65535))
# Respond!
h = HeadersFrame(1)
h.data = self.get_encoder().encode({':status': 200, 'Content-Type': 'not/real', 'Content-Length': 20})
h.flags.add('END_HEADERS')
sock.send(h.serialize())
d = DataFrame(1)
d.data = b'1234567890' * 2
d.flags.add('END_STREAM')
sock.send(d.serialize())
send_event.wait()
sock.close()
self._start_server(socket_handler)
s = requests.Session()
s.mount('https://%s' % self.host, HTTP20Adapter())
r = s.get('https://%s:%s/some/path' % (self.host, self.port))
# Assert about the received values.
assert r.status_code == 200
assert r.headers[b'Content-Type'] == b'not/real'
assert r.content == b'1234567890' * 2
send_event.set()
self.tear_down()
def test_adapter_sending_values(self, monkeypatch):
self.set_up()
# We need to patch the ssl_wrap_socket method to ensure that we
# forcefully upgrade.
old_wrap_socket = hyper.http11.connection.wrap_socket
def wrap(*args):
sock, _ = old_wrap_socket(*args)
return sock, 'h2'
monkeypatch.setattr(hyper.http11.connection, 'wrap_socket', wrap)
data = []
send_event = threading.Event()
def socket_handler(listener):
sock = listener.accept()[0]
# Do the handshake: conn header, settings, send settings, recv ack.
receive_preamble(sock)
# Now expect some data. One headers frame and one data frame.
data.append(sock.recv(65535))
data.append(sock.recv(65535))
# Respond!
h = HeadersFrame(1)
h.data = self.get_encoder().encode({':status': 200, 'Content-Type': 'not/real', 'Content-Length': 20})
h.flags.add('END_HEADERS')
sock.send(h.serialize())
d = DataFrame(1)
d.data = b'1234567890' * 2
d.flags.add('END_STREAM')
sock.send(d.serialize())
send_event.set()
sock.close()
self._start_server(socket_handler)
s = requests.Session()
s.mount('https://%s' % self.host, HTTP20Adapter())
r = s.post(
'https://%s:%s/some/path' % (self.host, self.port),
data='hi there',
)
# Assert about the sent values.
assert r.status_code == 200
f = decode_frame(data[0])
assert isinstance(f, HeadersFrame)
f = decode_frame(data[1])
assert isinstance(f, DataFrame)
assert f.data == b'hi there'
self.tear_down()
| {
"content_hash": "c52d7d084702fa8d37bf7713f774c07d",
"timestamp": "",
"source": "github",
"line_count": 623,
"max_line_length": 114,
"avg_line_length": 29.447833065810595,
"alnum_prop": 0.5609942221737708,
"repo_name": "masaori335/hyper",
"id": "89886e813be37bb2e4fa4a6265ffc33700337d78",
"size": "18370",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "test/test_integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Hy",
"bytes": "1249"
},
{
"name": "Makefile",
"bytes": "204"
},
{
"name": "Python",
"bytes": "340769"
},
{
"name": "Shell",
"bytes": "1368"
}
],
"symlink_target": ""
} |
from copy import deepcopy
import json
# django imports
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
# lfs imports
import lfs.core.utils
import lfs.discounts.utils
import lfs.order.utils
import lfs.payment.settings
import lfs.payment.utils
import lfs.shipping.utils
import lfs.voucher.utils
from lfs.addresses.utils import AddressManagement
from lfs.addresses.settings import CHECKOUT_NOT_REQUIRED_ADDRESS
from lfs.cart import utils as cart_utils
from lfs.core.models import Country
from lfs.checkout.settings import CHECKOUT_TYPE_ANON, CHECKOUT_TYPE_AUTH, ONE_PAGE_CHECKOUT_FORM
from lfs.customer import utils as customer_utils
from lfs.customer.utils import create_unique_username
from lfs.customer.forms import CreditCardForm, CustomerAuthenticationForm
from lfs.customer.forms import BankAccountForm
from lfs.customer.forms import RegisterForm
from lfs.payment.models import PaymentMethod
from lfs.voucher.models import Voucher
from lfs.voucher.settings import MESSAGES
def login(request, template_name="lfs/checkout/login.html"):
"""Displays a form to login or register/login the user within the check out
process.
The form's post request goes to lfs.customer.views.login where all the logic
happens - see there for more.
"""
# If the user is already authenticate we don't want to show this view at all
if request.user.is_authenticated():
return HttpResponseRedirect(reverse("lfs_checkout"))
shop = lfs.core.utils.get_default_shop(request)
# If only anonymous checkout allowed we don't want to show this view at all.
if shop.checkout_type == CHECKOUT_TYPE_ANON:
return HttpResponseRedirect(reverse("lfs_checkout"))
# Using Djangos default AuthenticationForm
login_form = CustomerAuthenticationForm()
register_form = RegisterForm()
if request.POST.get("action") == "login":
login_form = CustomerAuthenticationForm(data=request.POST)
login_form.fields["username"].label = _(u"E-Mail")
if login_form.is_valid():
from django.contrib.auth import login
login(request, login_form.get_user())
return lfs.core.utils.set_message_cookie(reverse("lfs_checkout"),
msg=_(u"You have been logged in."))
elif request.POST.get("action") == "register":
register_form = RegisterForm(data=request.POST)
if register_form.is_valid():
email = register_form.data.get("email")
password = register_form.data.get("password_1")
# Create user
user = User.objects.create_user(
username=create_unique_username(email), email=email, password=password)
# Notify
lfs.core.signals.customer_added.send(sender=user)
# Log in user
from django.contrib.auth import authenticate
user = authenticate(username=email, password=password)
from django.contrib.auth import login
login(request, user)
return lfs.core.utils.set_message_cookie(reverse("lfs_checkout"),
msg=_(u"You have been registered and logged in."))
return render_to_response(template_name, RequestContext(request, {
"login_form": login_form,
"register_form": register_form,
"anonymous_checkout": shop.checkout_type != CHECKOUT_TYPE_AUTH,
}))
def checkout_dispatcher(request):
"""Dispatcher to display the correct checkout form
"""
shop = lfs.core.utils.get_default_shop(request)
cart = cart_utils.get_cart(request)
if cart is None or not cart.get_items():
return empty_page_checkout(request)
if request.user.is_authenticated() or \
shop.checkout_type == CHECKOUT_TYPE_ANON:
return HttpResponseRedirect(reverse("lfs_checkout"))
else:
return HttpResponseRedirect(reverse("lfs_checkout_login"))
def cart_inline(request, template_name="lfs/checkout/checkout_cart_inline.html"):
"""Displays the cart items of the checkout page.
Factored out to be reusable for the starting request (which renders the
whole checkout page and subsequent ajax requests which refresh the
cart items.
"""
cart = cart_utils.get_cart(request)
# Shipping
selected_shipping_method = lfs.shipping.utils.get_selected_shipping_method(request)
shipping_costs = lfs.shipping.utils.get_shipping_costs(request, selected_shipping_method)
# Payment
selected_payment_method = lfs.payment.utils.get_selected_payment_method(request)
payment_costs = lfs.payment.utils.get_payment_costs(request, selected_payment_method)
# Cart costs
cart_price = 0
cart_tax = 0
if cart is not None:
cart_price = cart.get_price_gross(request) + shipping_costs["price_gross"] + payment_costs["price"]
cart_tax = cart.get_tax(request) + shipping_costs["tax"] + payment_costs["tax"]
discounts = lfs.discounts.utils.get_valid_discounts(request)
for discount in discounts:
cart_price = cart_price - discount["price_gross"]
cart_tax = cart_tax - discount["tax"]
# Voucher
voucher_number = ''
display_voucher = False
voucher_value = 0
voucher_tax = 0
voucher_message = MESSAGES[6]
if cart is not None:
try:
voucher_number = lfs.voucher.utils.get_current_voucher_number(request)
voucher = Voucher.objects.get(number=voucher_number)
except Voucher.DoesNotExist:
pass
else:
lfs.voucher.utils.set_current_voucher_number(request, voucher_number)
is_voucher_effective, voucher_message = voucher.is_effective(request, cart)
if is_voucher_effective:
display_voucher = True
voucher_value = voucher.get_price_gross(request, cart)
cart_price = cart_price - voucher_value
voucher_tax = voucher.get_tax(request, cart)
cart_tax = cart_tax - voucher_tax
else:
display_voucher = False
voucher_value = 0
voucher_tax = 0
if cart_price < 0:
cart_price = 0
if cart_tax < 0:
cart_tax = 0
cart_items = []
if cart:
for cart_item in cart.get_items():
product = cart_item.product
quantity = product.get_clean_quantity(cart_item.amount)
cart_items.append({
"obj": cart_item,
"quantity": quantity,
"product": product,
"product_price_net": cart_item.get_price_net(request),
"product_price_gross": cart_item.get_price_gross(request),
"product_tax": cart_item.get_tax(request),
})
return render_to_string(template_name, RequestContext(request, {
"cart": cart,
"cart_items": cart_items,
"cart_price": cart_price,
"cart_tax": cart_tax,
"display_voucher": display_voucher,
"discounts": discounts,
"voucher_value": voucher_value,
"voucher_tax": voucher_tax,
"shipping_costs": shipping_costs,
"payment_price": payment_costs["price"],
"selected_shipping_method": selected_shipping_method,
"selected_payment_method": selected_payment_method,
"voucher_number": voucher_number,
"voucher_message": voucher_message,
}))
def one_page_checkout(request, template_name="lfs/checkout/one_page_checkout.html"):
"""
One page checkout form.
"""
OnePageCheckoutForm = lfs.core.utils.import_symbol(ONE_PAGE_CHECKOUT_FORM)
cart = lfs.cart.utils.get_cart(request)
if cart is None:
return HttpResponseRedirect(reverse('lfs_cart'))
initial_address = {}
shop = lfs.core.utils.get_default_shop(request)
if request.user.is_anonymous():
if shop.checkout_type == CHECKOUT_TYPE_AUTH:
return HttpResponseRedirect(reverse("lfs_checkout_login"))
else:
initial_address['email'] = request.user.email
customer = lfs.customer.utils.get_or_create_customer(request)
invoice_address = customer.selected_invoice_address
shipping_address = customer.selected_shipping_address
bank_account = customer.selected_bank_account
credit_card = customer.selected_credit_card
if request.method == "POST":
checkout_form = OnePageCheckoutForm(data=request.POST)
iam = AddressManagement(customer, invoice_address, "invoice", request.POST, initial=initial_address)
sam = AddressManagement(customer, shipping_address, "shipping", request.POST, initial=initial_address)
bank_account_form = BankAccountForm(instance=bank_account, data=request.POST)
credit_card_form = CreditCardForm(instance=credit_card, data=request.POST)
if shop.confirm_toc and ("confirm_toc" not in request.POST):
toc = False
if checkout_form.errors is None:
checkout_form._errors = {}
checkout_form.errors["confirm_toc"] = _(u"Please confirm our terms and conditions")
else:
toc = True
if checkout_form.is_valid() and bank_account_form.is_valid() and iam.is_valid() and sam.is_valid() and toc:
if CHECKOUT_NOT_REQUIRED_ADDRESS == 'shipping':
iam.save()
if request.POST.get("no_shipping", "") == "":
# If the shipping address is given then save it.
sam.save()
else:
# If the shipping address is not given, the invoice address is copied.
if customer.selected_invoice_address:
if customer.selected_shipping_address:
# it might be possible that shipping and invoice addresses are same object
if customer.selected_shipping_address.pk != customer.selected_invoice_address.pk:
customer.selected_shipping_address.delete()
shipping_address = deepcopy(customer.selected_invoice_address)
shipping_address.id = None
shipping_address.pk = None
shipping_address.save()
customer.selected_shipping_address = shipping_address
else:
sam.save()
if request.POST.get("no_invoice", "") == "":
iam.save()
else:
if customer.selected_shipping_address:
if customer.selected_invoice_address:
# it might be possible that shipping and invoice addresses are same object
if customer.selected_invoice_address.pk != customer.selected_shipping_address.pk:
customer.selected_invoice_address.delete()
invoice_address = deepcopy(customer.selected_shipping_address)
invoice_address.id = None
invoice_address.pk = None
invoice_address.save()
customer.selected_invoice_address = invoice_address
customer.sync_selected_to_default_addresses()
# Save payment method
customer.selected_payment_method_id = request.POST.get("payment_method")
# Save bank account
if customer.selected_payment_method_id and \
int(customer.selected_payment_method_id) == lfs.payment.settings.PM_BANK:
customer.selected_bank_account = bank_account_form.save()
# Save credit card
if customer.selected_payment_method_id and \
int(customer.selected_payment_method_id) == lfs.payment.settings.PM_CREDIT_CARD:
customer.selected_credit_card = credit_card_form.save()
customer.save()
# process the payment method
result = lfs.payment.utils.process_payment(request)
if result["accepted"]:
return HttpResponseRedirect(result.get("next_url", reverse("lfs_thank_you")))
else:
if "message" in result:
checkout_form._errors[result.get("message_location")] = result.get("message")
else:
checkout_form = OnePageCheckoutForm()
iam = AddressManagement(customer, invoice_address, "invoice", initial=initial_address)
sam = AddressManagement(customer, shipping_address, "shipping", initial=initial_address)
bank_account_form = BankAccountForm(instance=bank_account)
credit_card_form = CreditCardForm(instance=credit_card)
# Payment
try:
selected_payment_method_id = request.POST.get("payment_method")
selected_payment_method = PaymentMethod.objects.get(pk=selected_payment_method_id)
except PaymentMethod.DoesNotExist:
selected_payment_method = lfs.payment.utils.get_selected_payment_method(request)
valid_payment_methods = lfs.payment.utils.get_valid_payment_methods(request)
display_bank_account = any([pm.type == lfs.payment.settings.PM_BANK for pm in valid_payment_methods])
display_credit_card = any([pm.type == lfs.payment.settings.PM_CREDIT_CARD for pm in valid_payment_methods])
return render_to_response(template_name, RequestContext(request, {
"checkout_form": checkout_form,
"bank_account_form": bank_account_form,
"credit_card_form": credit_card_form,
"invoice_address_inline": iam.render(request),
"shipping_address_inline": sam.render(request),
"shipping_inline": shipping_inline(request),
"payment_inline": payment_inline(request, bank_account_form),
"selected_payment_method": selected_payment_method,
"display_bank_account": display_bank_account,
"display_credit_card": display_credit_card,
"voucher_number": lfs.voucher.utils.get_current_voucher_number(request),
"cart_inline": cart_inline(request),
"settings": settings,
}))
def empty_page_checkout(request, template_name="lfs/checkout/empty_page_checkout.html"):
"""
"""
return render_to_response(template_name, RequestContext(request, {
"shopping_url": reverse("lfs_shop_view"),
}))
def thank_you(request, template_name="lfs/checkout/thank_you_page.html"):
"""Displays a thank you page ot the customer
"""
order = request.session.get("order")
pay_link = order.get_pay_link(request) if order else None
return render_to_response(template_name, RequestContext(request, {
"order": order,
"pay_link": pay_link,
}))
def payment_inline(request, form, template_name="lfs/checkout/payment_inline.html"):
"""Displays the selectable payment methods of the checkout page.
Factored out to be reusable for the starting request (which renders the
whole checkout page and subsequent ajax requests which refresh the
selectable payment methods.
Passing the form to be able to display payment forms within the several
payment methods, e.g. credit card form.
"""
selected_payment_method = lfs.payment.utils.get_selected_payment_method(request)
valid_payment_methods = lfs.payment.utils.get_valid_payment_methods(request)
return render_to_string(template_name, RequestContext(request, {
"payment_methods": valid_payment_methods,
"selected_payment_method": selected_payment_method,
"form": form,
}))
def shipping_inline(request, template_name="lfs/checkout/shipping_inline.html"):
"""Displays the selectable shipping methods of the checkout page.
Factored out to be reusable for the starting request (which renders the
whole checkout page and subsequent ajax requests which refresh the
selectable shipping methods.
"""
selected_shipping_method = lfs.shipping.utils.get_selected_shipping_method(request)
shipping_methods = lfs.shipping.utils.get_valid_shipping_methods(request)
return render_to_string(template_name, RequestContext(request, {
"shipping_methods": shipping_methods,
"selected_shipping_method": selected_shipping_method,
}))
def check_voucher(request):
"""
"""
voucher_number = lfs.voucher.utils.get_current_voucher_number(request)
lfs.voucher.utils.set_current_voucher_number(request, voucher_number)
result = json.dumps({
"html": (("#cart-inline", cart_inline(request)),)
})
return HttpResponse(result, content_type='application/json')
def changed_checkout(request):
"""
"""
OnePageCheckoutForm = lfs.core.utils.import_symbol(ONE_PAGE_CHECKOUT_FORM)
form = OnePageCheckoutForm()
customer = customer_utils.get_or_create_customer(request)
_save_customer(request, customer)
_save_country(request, customer)
result = json.dumps({
"shipping": shipping_inline(request),
"payment": payment_inline(request, form),
"cart": cart_inline(request),
})
return HttpResponse(result, content_type='application/json')
def changed_invoice_country(request):
"""
Refreshes the invoice address form, after the invoice country has been
changed.
"""
customer = lfs.customer.utils.get_or_create_customer(request)
address = customer.selected_invoice_address
country_iso = request.POST.get("invoice-country")
if address and country_iso:
address.country = Country.objects.get(code=country_iso.lower())
address.save()
customer.sync_selected_to_default_invoice_address()
am = AddressManagement(customer, address, "invoice")
result = json.dumps({
"invoice_address": am.render(request, country_iso),
})
return HttpResponse(result, content_type='application/json')
def changed_shipping_country(request):
"""
Refreshes the shipping address form, after the shipping country has been
changed.
"""
customer = lfs.customer.utils.get_or_create_customer(request)
address = customer.selected_shipping_address
country_iso = request.POST.get("shipping-country")
if address:
address.country = Country.objects.get(code=country_iso.lower())
address.save()
customer.sync_selected_to_default_shipping_address()
am = AddressManagement(customer, address, "shipping")
result = json.dumps({
"shipping_address": am.render(request, country_iso),
})
return HttpResponse(result, content_type='application/json')
def _save_country(request, customer):
"""
"""
# Update country for address that is marked as 'same as invoice' or 'same as shipping'
if CHECKOUT_NOT_REQUIRED_ADDRESS == 'shipping':
country_iso = request.POST.get("shipping-country", None)
if request.POST.get("no_shipping") == "on":
country_iso = request.POST.get("invoice-country", None)
if country_iso is not None:
country = Country.objects.get(code=country_iso.lower())
if customer.selected_shipping_address:
customer.selected_shipping_address.country = country
customer.selected_shipping_address.save()
customer.selected_country = country
customer.save()
customer.sync_selected_to_default_shipping_address()
lfs.shipping.utils.update_to_valid_shipping_method(request, customer)
lfs.payment.utils.update_to_valid_payment_method(request, customer)
customer.save()
else:
# update invoice address if 'same as shipping' address option is set and shipping address was changed
if request.POST.get("no_invoice") == "on":
country_iso = request.POST.get("shipping-country", None)
if country_iso is not None:
country = Country.objects.get(code=country_iso.lower())
if customer.selected_invoice_address:
customer.selected_invoice_address.country = country
customer.selected_invoice_address.save()
customer.sync_selected_to_default_invoice_address()
def _save_customer(request, customer):
"""
"""
shipping_method = request.POST.get("shipping-method")
customer.selected_shipping_method_id = shipping_method
payment_method = request.POST.get("payment_method")
customer.selected_payment_method_id = payment_method
customer.save()
lfs.shipping.utils.update_to_valid_shipping_method(request, customer)
lfs.payment.utils.update_to_valid_payment_method(request, customer)
customer.save()
| {
"content_hash": "c0199f8358784635ed41587abe559b00",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 115,
"avg_line_length": 40.560077519379846,
"alnum_prop": 0.655788618663099,
"repo_name": "leadbrick/django-lfs",
"id": "62cbef3ebea9199fbaf418e48d687380859fd21f",
"size": "20946",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lfs/checkout/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96502"
},
{
"name": "HTML",
"bytes": "615650"
},
{
"name": "JavaScript",
"bytes": "591493"
},
{
"name": "Python",
"bytes": "1384866"
}
],
"symlink_target": ""
} |
"""
Building and world design commands
"""
from django.conf import settings
from src.objects.models import ObjectDB, ObjAttribute
from src.players.models import PlayerAttribute
from src.utils import create, utils, debug
from src.utils.ansi import raw
from src.commands.default.muxcommand import MuxCommand
from src.commands.cmdhandler import get_and_merge_cmdsets
# limit symbol import for API
__all__ = ("ObjManipCommand", "CmdSetObjAlias", "CmdCopy",
"CmdCpAttr", "CmdMvAttr", "CmdCreate", "CmdDebug",
"CmdDesc", "CmdDestroy", "CmdDig", "CmdTunnel", "CmdLink",
"CmdUnLink", "CmdSetHome", "CmdListCmdSets", "CmdName",
"CmdOpen", "CmdSetAttribute", "CmdTypeclass", "CmdWipe",
"CmdLock", "CmdExamine", "CmdFind", "CmdTeleport",
"CmdScript")
try:
# used by @set
from ast import literal_eval as _LITERAL_EVAL
except ImportError:
# literal_eval is not available before Python 2.6
_LITERAL_EVAL = None
# used by @find
CHAR_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
class ObjManipCommand(MuxCommand):
"""
This is a parent class for some of the defining objmanip commands
since they tend to have some more variables to define new objects.
Each object definition can have several components. First is
always a name, followed by an optional alias list and finally an
some optional data, such as a typeclass or a location. A comma ','
separates different objects. Like this:
name1;alias;alias;alias:option, name2;alias;alias ...
Spaces between all components are stripped.
A second situation is attribute manipulation. Such commands
are simpler and offer combinations
objname/attr/attr/attr, objname/attr, ...
"""
# OBS - this is just a parent - it's not intended to actually be
# included in a commandset on its own!
def parse(self):
"""
We need to expand the default parsing to get all
the cases, see the module doc.
"""
# get all the normal parsing done (switches etc)
super(ObjManipCommand, self).parse()
obj_defs = ([],[]) # stores left- and right-hand side of '='
obj_attrs = ([], []) # "
for iside, arglist in enumerate((self.lhslist, self.rhslist)):
# lhslist/rhslist is already split by ',' at this point
for objdef in arglist:
aliases, option, attrs = [], None, []
if ':' in objdef:
objdef, option = [part.strip() for part in objdef.rsplit(':', 1)]
if ';' in objdef:
objdef, aliases = [part.strip() for part in objdef.split(';', 1)]
aliases = [alias.strip() for alias in aliases.split(';') if alias.strip()]
if '/' in objdef:
objdef, attrs = [part.strip() for part in objdef.split('/', 1)]
attrs = [part.strip().lower() for part in attrs.split('/') if part.strip()]
# store data
obj_defs[iside].append({"name":objdef, 'option':option, 'aliases':aliases})
obj_attrs[iside].append({"name":objdef, 'attrs':attrs})
# store for future access
self.lhs_objs = obj_defs[0]
self.rhs_objs = obj_defs[1]
self.lhs_objattr = obj_attrs[0]
self.rhs_objattr = obj_attrs[1]
class CmdSetObjAlias(MuxCommand):
"""
Adding permanent aliases
Usage:
@alias <obj> [= [alias[,alias,alias,...]]]
Assigns aliases to an object so it can be referenced by more
than one name. Assign empty to remove all aliases from object.
Observe that this is not the same thing as aliases
created with the 'alias' command! Aliases set with @alias are
changing the object in question, making those aliases usable
by everyone.
"""
key = "@alias"
aliases = "@setobjalias"
locks = "cmd:perm(setobjalias) or perm(Builders)"
help_category = "Building"
def func(self):
"Set the aliases."
caller = self.caller
if not self.lhs:
string = "Usage: @alias <obj> [= [alias[,alias ...]]]"
self.caller.msg(string)
return
objname = self.lhs
# Find the object to receive aliases
obj = caller.search(objname)
if not obj:
return
if self.rhs == None:
# no =, so we just list aliases on object.
aliases = obj.aliases
if aliases:
caller.msg("Aliases for '%s': %s" % (obj.key, ", ".join(aliases)))
else:
caller.msg("No aliases exist for '%s'." % obj.key)
return
if not obj.access(caller, 'edit'):
caller.msg("You don't have permission to do that.")
return
if not self.rhs:
# we have given an empty =, so delete aliases
old_aliases = obj.aliases
if old_aliases:
caller.msg("Cleared aliases from %s: %s" % (obj.key, ", ".join(old_aliases)))
del obj.dbobj.aliases
else:
caller.msg("No aliases to clear.")
return
# merge the old and new aliases (if any)
old_aliases = obj.aliases
new_aliases = [alias.strip().lower() for alias in self.rhs.split(',') if alias.strip()]
# make the aliases only appear once
old_aliases.extend(new_aliases)
aliases = list(set(old_aliases))
# save back to object.
obj.aliases = aliases
# we treat this as a re-caching (relevant for exits to re-build their exit commands with the correct aliases)
caller.msg("Aliases for '%s' are now set to %s." % (obj.key, ", ".join(obj.aliases)))
class CmdCopy(ObjManipCommand):
"""
@copy - copy objects
Usage:
@copy[/reset] <original obj> [= new_name][;alias;alias..][:new_location] [,new_name2 ...]
switch:
reset - make a 'clean' copy off the object, thus
removing any changes that might have been made to the original
since it was first created.
Create one or more copies of an object. If you don't supply any targets, one exact copy
of the original object will be created with the name *_copy.
"""
key = "@copy"
locks = "cmd:perm(copy) or perm(Builders)"
help_category = "Building"
def func(self):
"Uses ObjManipCommand.parse()"
caller = self.caller
args = self.args
if not args:
caller.msg("Usage: @copy <obj> [=new_name[;alias;alias..]][:new_location] [, new_name2...]")
return
if not self.rhs:
# this has no target =, so an identical new object is created.
from_obj_name = self.args
from_obj = caller.search(from_obj_name)
if not from_obj:
return
to_obj_name = "%s_copy" % from_obj_name
to_obj_aliases = ["%s_copy" % alias for alias in from_obj.aliases]
copiedobj = ObjectDB.objects.copy_object(from_obj, new_key=to_obj_name,
new_aliases=to_obj_aliases)
if copiedobj:
string = "Identical copy of %s, named '%s' was created." % (from_obj_name, to_obj_name)
else:
string = "There was an error copying %s."
else:
# we have specified =. This might mean many object targets
from_obj_name = self.lhs_objs[0]['name']
from_obj = caller.search(from_obj_name)
if not from_obj:
return
for objdef in self.rhs_objs:
# loop through all possible copy-to targets
to_obj_name = objdef['name']
to_obj_aliases = objdef['aliases']
to_obj_location = objdef['option']
if to_obj_location:
to_obj_location = caller.search(to_obj_location, global_search=True)
if not to_obj_location:
return
copiedobj = ObjectDB.objects.copy_object(from_obj, new_key=to_obj_name,
new_location=to_obj_location, new_aliases=to_obj_aliases)
if copiedobj:
string = "Copied %s to '%s' (aliases: %s)." % (from_obj_name, to_obj_name,
to_obj_aliases)
else:
string = "There was an error copying %s to '%s'." % (from_obj_name,
to_obj_name)
# we are done, echo to user
caller.msg(string)
class CmdCpAttr(ObjManipCommand):
"""
@cpattr - copy attributes
Usage:
@cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
@cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
move - delete the attribute from the source object after copying.
Example:
@cpattr coolness = Anna/chillout, Anna/nicety, Tom/nicety
->
copies the coolness attribute (defined on yourself), to attributes
on Anna and Tom.
Copy the attribute one object to one or more attributes on another object. If
you don't supply a source object, yourself is used.
"""
key = "@cpattr"
locks = "cmd:perm(cpattr) or perm(Builders)"
help_category = "Building"
def func(self):
"""
Do the copying.
"""
caller = self.caller
if not self.rhs:
string = """Usage:
@cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
@cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
caller.msg(string)
return
lhs_objattr = self.lhs_objattr
to_objs = self.rhs_objattr
from_obj_name = lhs_objattr[0]['name']
from_obj_attrs = lhs_objattr[0]['attrs']
if not from_obj_attrs:
# this means the from_obj_name is actually an attribute name on self.
from_obj_attrs = [from_obj_name]
from_obj = self.caller
from_obj_name = self.caller.name
else:
from_obj = caller.search(from_obj_name)
if not from_obj or not to_objs:
caller.msg("You have to supply both source object and target(s).")
return
if not from_obj.has_attribute(from_obj_attrs[0]):
caller.msg("%s doesn't have an attribute %s." % (from_obj_name, from_obj_attrs[0]))
return
srcvalue = from_obj.get_attribute(from_obj_attrs[0])
#copy to all to_obj:ects
if "move" in self.switches:
string = "Moving "
else:
string = "Copying "
string += "%s/%s (with value %s) ..." % (from_obj_name, from_obj_attrs[0], srcvalue)
for to_obj in to_objs:
to_obj_name = to_obj['name']
to_obj_attrs = to_obj['attrs']
to_obj = caller.search(to_obj_name)
if not to_obj:
string += "\nCould not find object '%s'" % to_obj_name
continue
for inum, from_attr in enumerate(from_obj_attrs):
try:
to_attr = to_obj_attrs[inum]
except IndexError:
# if there are too few attributes given
# on the to_obj, we copy the original name instead.
to_attr = from_attr
to_obj.set_attribute(to_attr, srcvalue)
if "move" in self.switches and not (from_obj == to_obj and from_attr == to_attr):
from_obj.del_attribute(from_attr)
string += "\nMoved %s.%s -> %s.%s." % (from_obj.name, from_attr,
to_obj_name, to_attr)
else:
string += "\nCopied %s.%s -> %s.%s." % (from_obj.name, from_attr,
to_obj_name, to_attr)
caller.msg(string)
class CmdMvAttr(ObjManipCommand):
"""
@mvattr - move attributes
Usage:
@mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
@mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
copy - Don't delete the original after moving.
Move an attribute from one object to one or more attributes on another object. If
you don't supply a source object, yourself is used.
"""
key = "@mvattr"
locks = "cmd:perm(mvattr) or perm(Builders)"
help_category = "Building"
def func(self):
"""
Do the moving
"""
if not self.rhs:
string = """Usage:
@mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
@mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
@mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
self.caller.msg(string)
return
# simply use @cpattr for all the functionality
if "copy" in self.switches:
self.caller.execute_cmd("@cpattr %s" % self.args)
else:
self.caller.execute_cmd("@cpattr/move %s" % self.args)
class CmdCreate(ObjManipCommand):
"""
@create - create new objects
Usage:
@create[/drop] objname[;alias;alias...][:typeclass], objname...
switch:
drop - automatically drop the new object into your current location (this is not echoed)
this also sets the new object's home to the current location rather than to you.
Creates one or more new objects. If typeclass is given, the object
is created as a child of this typeclass. The typeclass script is
assumed to be located under game/gamesrc/types and any further
directory structure is given in Python notation. So if you have a
correct typeclass object defined in
game/gamesrc/types/examples/red_button.py, you could create a new
object of this type like this:
@create button;red : examples.red_button.RedButton
"""
key = "@create"
locks = "cmd:perm(create) or perm(Builders)"
help_category = "Building"
def func(self):
"""
Creates the object.
"""
caller = self.caller
if not self.args:
string = "Usage: @create[/drop] <newname>[;alias;alias...] [:typeclass_path]"
caller.msg(string)
return
# create the objects
for objdef in self.lhs_objs:
string = ""
name = objdef['name']
aliases = objdef['aliases']
typeclass = objdef['option']
# create object (if not a valid typeclass, the default
# object typeclass will automatically be used)
lockstring = "control:id(%s);examine:perm(Builders);delete:id(%s) or perm(Wizards);get:all()" % (caller.id, caller.id)
obj = create.create_object(typeclass, name, caller,
home=caller, aliases=aliases, locks=lockstring, report_to=caller)
if not obj:
continue
if aliases:
string = "You create a new %s: %s (aliases: %s)."
string = string % (obj.typeclass.typename, obj.name, ", ".join(aliases))
else:
string = "You create a new %s: %s."
string = string % (obj.typeclass.typename, obj.name)
# set a default desc
if not obj.db.desc:
obj.db.desc = "You see nothing special."
if 'drop' in self.switches:
if caller.location:
obj.home = caller.location
obj.move_to(caller.location, quiet=True)
if string:
caller.msg(string)
class CmdDebug(MuxCommand):
"""
Debug game entities
Usage:
@debug[/switch] <path to code>
Switches:
obj - debug an object
script - debug a script
Examples:
@debug/script game.gamesrc.scripts.myscript.MyScript
@debug/script myscript.MyScript
@debug/obj examples.red_button.RedButton
This command helps when debugging the codes of objects and scripts.
It creates the given object and runs tests on its hooks.
"""
key = "@debug"
locks = "cmd:perm(debug) or perm(Builders)"
help_category = "Building"
def func(self):
"Running the debug"
if not self.args or not self.switches:
self.caller.msg("Usage: @debug[/obj][/script] <path>")
return
path = self.args
if 'obj' in self.switches or 'object' in self.switches:
# create and debug the object
self.caller.msg(debug.debug_object(path, self.caller))
self.caller.msg(debug.debug_object_scripts(path, self.caller))
elif 'script' in self.switches:
self.caller.msg(debug.debug_syntax_script(path))
class CmdDesc(MuxCommand):
"""
@desc - describe an object or room
Usage:
@desc [<obj> =] >description>
Setts the "desc" attribute on an
object. If an object is not given,
describe the current room.
"""
key = "@desc"
aliases = "@describe"
locks = "cmd:perm(desc) or perm(Builders)"
help_category = "Building"
def func(self):
"Define command"
caller = self.caller
if not self.args:
caller.msg("Usage: @desc [<obj> =] >description>")
return
if self.rhs:
# We have an =
obj = caller.search(self.lhs)
if not obj:
return
desc = self.rhs
else:
obj = caller.location
desc = self.args
# storing the description
obj.db.desc = desc
caller.msg("The description was set on %s." % obj.key)
class CmdDestroy(MuxCommand):
"""
@destroy - remove objects from the game
Usage:
@destroy[/switches] [obj, obj2, obj3, [dbref-dbref], ...]
switches:
override - The @destroy command will usually avoid accidentally destroying
player objects. This switch overrides this safety.
examples:
@destroy house, roof, door, 44-78
@destroy 5-10, flower, 45
Destroys one or many objects. If dbrefs are used, a range to delete can be
given, e.g. 4-10. Also the end points will be deleted.
"""
key = "@destroy"
aliases = ["@delete", "@del"]
locks = "cmd:perm(destroy) or perm(Builders)"
help_category = "Building"
def func(self):
"Implements the command."
caller = self.caller
if not self.args or not self.lhslist:
caller.msg("Usage: @destroy[/switches] [obj, obj2, obj3, [dbref-dbref],...]")
return ""
def delobj(objname, byref=False):
# helper function for deleting a single object
string = ""
obj = caller.search(objname, global_dbref=byref)
if not obj:
self.caller.msg(" (Objects to destroy must either be local or specified with a unique dbref.)")
return ""
if not "override" in self.switches and obj.dbid == int(settings.CHARACTER_DEFAULT_HOME.lstrip("#")):
return "\nYou are trying to delete CHARACTER_DEFAULT_HOME. If you want to do this, use the /override switch."
objname = obj.name
if not obj.access(caller, 'delete'):
return "\nYou don't have permission to delete %s." % objname
if obj.player and not 'override' in self.switches:
return "\nObject %s is controlled by an active player. Use /override to delete anyway." % objname
had_exits = hasattr(obj, "exits") and obj.exits
had_objs = hasattr(obj, "contents") and any(obj for obj in obj.contents
if not (hasattr(obj, "exits") and obj not in obj.exits))
# do the deletion
okay = obj.delete()
if not okay:
string += "\nERROR: %s not deleted, probably because at_obj_delete() returned False." % objname
else:
string += "\n%s was destroyed." % objname
if had_exits:
string += " Exits to and from %s were destroyed as well." % objname
if had_objs:
string += " Objects inside %s were moved to their homes." % objname
return string
string = ""
for objname in self.lhslist:
if '-' in objname:
# might be a range of dbrefs
dmin, dmax = [utils.dbref(part, reqhash=False) for part in objname.split('-', 1)]
if dmin and dmax:
for dbref in range(int(dmin),int(dmax+1)):
string += delobj("#" + str(dbref), True)
else:
string += delobj(objname)
else:
string += delobj(objname, True)
if string:
caller.msg(string.strip())
class CmdDig(ObjManipCommand):
"""
@dig - build and connect new rooms to the current one
Usage:
@dig[/switches] roomname[;alias;alias...][:typeclass]
[= exit_to_there[;alias][:typeclass]]
[, exit_to_here[;alias][:typeclass]]
Switches:
tel or teleport - move yourself to the new room
Examples:
@dig kitchen = north;n, south;s
@dig house:myrooms.MyHouseTypeclass
@dig sheer cliff;cliff;sheer = climb up, climb down
This command is a convenient way to build rooms quickly; it creates the new room and you can optionally
set up exits back and forth between your current room and the new one. You can add as many aliases as you
like to the name of the room and the exits in question; an example would be 'north;no;n'.
"""
key = "@dig"
locks = "cmd:perm(dig) or perm(Builders)"
help_category = "Building"
def func(self):
"Do the digging. Inherits variables from ObjManipCommand.parse()"
caller = self.caller
if not self.lhs:
string = "Usage: @dig[/teleport] roomname[;alias;alias...][:parent] [= exit_there"
string += "[;alias;alias..][:parent]] "
string += "[, exit_back_here[;alias;alias..][:parent]]"
caller.msg(string)
return
room = self.lhs_objs[0]
if not room["name"]:
caller.msg("You must supply a new room name.")
return
location = caller.location
# Create the new room
typeclass = room['option']
if not typeclass:
typeclass = settings.BASE_ROOM_TYPECLASS
# create room
lockstring = "control:id(%s) or perm(Immortals); delete:id(%s) or perm(Wizards); edit:id(%s) or perm(Wizards)"
lockstring = lockstring % (caller.dbref, caller.dbref, caller.dbref)
new_room = create.create_object(typeclass, room["name"],
aliases=room["aliases"], report_to=caller)
new_room.locks.add(lockstring)
alias_string = ""
if new_room.aliases:
alias_string = " (%s)" % ", ".join(new_room.aliases)
room_string = "Created room %s(%s)%s of type %s." % (new_room, new_room.dbref, alias_string, typeclass)
# create exit to room
exit_to_string = ""
exit_back_string = ""
if self.rhs_objs:
to_exit = self.rhs_objs[0]
if not to_exit["name"]:
exit_to_string = \
"\nNo exit created to new room."
elif not location:
exit_to_string = \
"\nYou cannot create an exit from a None-location."
else:
# Build the exit to the new room from the current one
typeclass = to_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_to_exit = create.create_object(typeclass, to_exit["name"], location,
aliases=to_exit["aliases"],
locks=lockstring, destination=new_room, report_to=caller)
alias_string = ""
if new_to_exit.aliases:
alias_string = " (%s)" % ", ".join(new_to_exit.aliases)
exit_to_string = "\nCreated Exit from %s to %s: %s(%s)%s."
exit_to_string = exit_to_string % (location.name, new_room.name, new_to_exit,
new_to_exit.dbref, alias_string)
# Create exit back from new room
if len(self.rhs_objs) > 1:
# Building the exit back to the current room
back_exit = self.rhs_objs[1]
if not back_exit["name"]:
exit_back_string = \
"\nNo back exit created."
elif not location:
exit_back_string = \
"\nYou cannot create an exit back to a None-location."
else:
typeclass = back_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_back_exit = create.create_object(typeclass, back_exit["name"],
new_room, aliases=back_exit["aliases"],
locks=lockstring, destination=location, report_to=caller)
alias_string = ""
if new_back_exit.aliases:
alias_string = " (%s)" % ", ".join(new_back_exit.aliases)
exit_back_string = "\nCreated Exit back from %s to %s: %s(%s)%s."
exit_back_string = exit_back_string % (new_room.name, location.name,
new_back_exit, new_back_exit.dbref, alias_string)
caller.msg("%s%s%s" % (room_string, exit_to_string, exit_back_string))
if new_room and ('teleport' in self.switches or "tel" in self.switches):
caller.move_to(new_room)
class CmdTunnel(MuxCommand):
"""
dig in often-used directions
Usage:
@tunnel[/switch] <direction> [= roomname[;alias;alias;...][:typeclass]]
Switches:
oneway - do not create an exit back to the current location
tel - teleport to the newly created room
Example:
@tunnel n
@tunnel n = house;mike's place;green building
This is a simple way to build using pre-defined directions:
{wn,ne,e,se,s,sw,w,nw{n (north, northeast etc)
{wu,d{n (up and down)
{wi,o{n (in and out)
The full names (north, in, southwest, etc) will always be put as
main name for the exit, using the abbreviation as an alias (so an
exit will always be able to be used with both "north" as well as
"n" for example). Opposite directions will automatically be
created back from the new room unless the /oneway switch is given.
For more flexibility and power in creating rooms, use @dig.
"""
key = "@tunnel"
aliases = ["@tun"]
locks = "cmd: perm(tunnel) or perm(Builders)"
help_category = "Building"
# store the direction, full name and its opposite
directions = {"n" : ("north", "s"),
"ne": ("northeast", "sw"),
"e" : ("east", "w"),
"se": ("southeast", "nw"),
"s" : ("south", "n"),
"sw": ("southwest", "ne"),
"w" : ("west", "e"),
"nw": ("northwest", "se"),
"u" : ("up", "d"),
"d" : ("down", "u"),
"i" : ("in", "o"),
"o" : ("out", "i")}
def func(self):
"Implements the tunnel command"
if not self.args or not self.lhs:
string = "Usage: @tunnel[/switch] <direction> [= roomname[;alias;alias;...][:typeclass]]"
self.caller.msg(string)
return
if self.lhs not in self.directions:
string = "@tunnel can only understand the following directions: %s." % ",".join(sorted(self.directions.keys()))
string += "\n(use @dig for more freedom)"
self.caller.msg(string)
return
# retrieve all input and parse it
exitshort = self.lhs
exitname, backshort = self.directions[exitshort]
backname = self.directions[backshort][0]
roomname = "Some place"
if self.rhs:
roomname = self.rhs # this may include aliases; that's fine.
telswitch = ""
if "tel" in self.switches:
telswitch = "/teleport"
backstring = ""
if not "oneway" in self.switches:
backstring = ", %s;%s" % (backname, backshort)
# build the string we will use to call @dig
digstring = "@dig%s %s = %s;%s%s" % (telswitch, roomname, exitname, exitshort, backstring)
self.caller.execute_cmd(digstring)
class CmdLink(MuxCommand):
"""
@link - connect objects
Usage:
@link[/switches] <object> = <target>
@link[/switches] <object> =
@link[/switches] <object>
Switch:
twoway - connect two exits. For this to work, BOTH <object>
and <target> must be exit objects.
If <object> is an exit, set its destination to <target>. Two-way operation
instead sets the destination to the *locations* of the respective given
arguments.
The second form (a lone =) sets the destination to None (same as the @unlink command)
and the third form (without =) just shows the currently set destination.
"""
key = "@link"
locks = "cmd:perm(link) or perm(Builders)"
help_category = "Building"
def func(self):
"Perform the link"
caller = self.caller
if not self.args:
caller.msg("Usage: @link[/twoway] <object> = <target>")
return
object_name = self.lhs
# get object
obj = caller.search(object_name, global_search=True)
if not obj:
return
string = ""
if self.rhs:
# this means a target name was given
target = caller.search(self.rhs, global_search=True)
if not target:
return
string = ""
if not obj.destination:
string += "Note: %s(%s) did not have a destination set before. Make sure you linked the right thing." % (obj.name,obj.dbref)
if "twoway" in self.switches:
if not (target.location and obj.location):
string = "To create a two-way link, %s and %s must both have a location" % (obj, target)
string += " (i.e. they cannot be rooms, but should be exits)."
self.caller.msg(string)
return
if not target.destination:
string += "\nNote: %s(%s) did not have a destination set before. Make sure you linked the right thing." % (target.name, target.dbref)
obj.destination = target.location
target.destination = obj.location
string += "\nLink created %s (in %s) <-> %s (in %s) (two-way)." % (obj.name, obj.location, target.name, target.location)
else:
obj.destination = target
string += "\nLink created %s -> %s (one way)." % (obj.name, target)
elif self.rhs == None:
# this means that no = was given (otherwise rhs
# would have been an empty string). So we inspect
# the home/destination on object
dest = obj.destination
if dest:
string = "%s is an exit to %s." % (obj.name, dest.name)
else:
string = "%s is not an exit. Its home location is %s." % (obj.name, obj.home)
else:
# We gave the command @link 'obj = ' which means we want to
# clear destination.
if obj.destination:
obj.destination = None
string = "Former exit %s no longer links anywhere." % obj.name
else:
string = "%s had no destination to unlink." % obj.name
# give feedback
caller.msg(string.strip())
class CmdUnLink(CmdLink):
"""
@unlink - unconnect objects
Usage:
@unlink <Object>
Unlinks an object, for example an exit, disconnecting
it from whatever it was connected to.
"""
# this is just a child of CmdLink
key = "@unlink"
locks = "cmd:perm(unlink) or perm(Builders)"
help_key = "Building"
def func(self):
"""
All we need to do here is to set the right command
and call func in CmdLink
"""
caller = self.caller
if not self.args:
caller.msg("Usage: @unlink <object>")
return
# This mimics '@link <obj> = ' which is the same as @unlink
self.rhs = ""
# call the @link functionality
super(CmdUnLink, self).func()
class CmdSetHome(CmdLink):
"""
@home - control an object's home location
Usage:
@home <obj> [= home_location]
The "home" location is a "safety" location for objects; they
will be moved there if their current location ceases to exist. All
objects should always have a home location for this reason.
It is also a convenient target of the "home" command.
If no location is given, just view the object's home location.
"""
key = "@home"
aliases = "@sethome"
locks = "cmd:perm(@home) or perm(Builders)"
help_category = "Building"
def func(self):
"implement the command"
if not self.args:
string = "Usage: @home <obj> [= home_location]"
self.caller.msg(string)
return
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
if not self.rhs:
# just view
home = obj.home
if not home:
string = "This object has no home location set!"
else:
string = "%s's current home is %s(%s)." % (obj, home, home.dbref)
else:
# set a home location
new_home = self.caller.search(self.rhs, global_search=True)
if not new_home:
return
old_home = obj.home
obj.home = new_home
if old_home:
string = "%s's home location was changed from %s(%s) to %s(%s)." % (obj, old_home, old_home.dbref, new_home, new_home.dbref)
else:
string = "%s' home location was set to %s(%s)." % (obj, new_home, new_home.dbref)
self.caller.msg(string)
class CmdListCmdSets(MuxCommand):
"""
list command sets on an object
Usage:
@cmdsets [obj]
This displays all cmdsets assigned
to a user. Defaults to yourself.
"""
key = "@cmdsets"
aliases = "@listcmsets"
locks = "cmd:perm(listcmdsets) or perm(Builders)"
help_category = "Building"
def func(self):
"list the cmdsets"
caller = self.caller
if self.arglist:
obj = caller.search(self.arglist[0])
if not obj:
return
else:
obj = caller
string = "%s" % obj.cmdset
caller.msg(string)
class CmdName(ObjManipCommand):
"""
cname - change the name and/or aliases of an object
Usage:
@name obj = name;alias1;alias2
Rename an object to something new.
"""
key = "@name"
aliases = ["@rename"]
locks = "cmd:perm(rename) or perm(Builders)"
help_category = "Building"
def func(self):
"change the name"
caller = self.caller
if not self.args:
string = "Usage: @name <obj> = <newname>[;alias;alias;...]"
caller.msg(string)
return
if self.lhs_objs:
objname = self.lhs_objs[0]['name']
obj = caller.search(objname)
if not obj:
return
if self.rhs_objs:
newname = self.rhs_objs[0]['name']
aliases = self.rhs_objs[0]['aliases']
else:
newname = self.rhs
aliases = None
if not newname and not aliases:
caller.msg("No names or aliases defined!")
return
# change the name and set aliases:
if newname:
obj.name = newname
astring = ""
if aliases:
obj.aliases = aliases
astring = " (%s)" % (", ".join(aliases))
# fix for exits - we need their exit-command to change name too
if obj.destination:
obj.flush_from_cache()
caller.msg("Object's name changed to '%s'%s." % (newname, astring))
class CmdOpen(ObjManipCommand):
"""
@open - create new exit
Usage:
@open <new exit>[;alias;alias..][:typeclass] [,<return exit>[;alias;..][:typeclass]]] = <destination>
Handles the creation of exits. If a destination is given, the exit
will point there. The <return exit> argument sets up an exit at the
destination leading back to the current room. Destination name
can be given both as a #dbref and a name, if that name is globally
unique.
"""
key = "@open"
locks = "cmd:perm(open) or perm(Builders)"
help_category = "Building"
# a custom member method to chug out exits and do checks
def create_exit(self, exit_name, location, destination, exit_aliases=None, typeclass=None):
"""
Helper function to avoid code duplication.
At this point we know destination is a valid location
"""
caller = self.caller
string = ""
# check if this exit object already exists at the location.
# we need to ignore errors (so no automatic feedback)since we
# have to know the result of the search to decide what to do.
exit_obj = caller.search(exit_name, location=location, ignore_errors=True)
if len(exit_obj) > 1:
# give error message and return
caller.search(exit_name, location=location)
return
if exit_obj:
exit_obj = exit_obj[0]
if not exit_obj.destination:
# we are trying to link a non-exit
string = "'%s' already exists and is not an exit!\nIf you want to convert it "
string += "to an exit, you must assign an object to the 'destination' property first."
caller.msg(string % exit_name)
return None
# we are re-linking an old exit.
old_destination = exit_obj.destination
if old_destination:
string = "Exit %s already exists." % exit_name
if old_destination.id != destination.id:
# reroute the old exit.
exit_obj.destination = destination
exit_obj.aliases = exit_aliases
string += " Rerouted its old destination '%s' to '%s' and changed aliases." % \
(old_destination.name, destination.name)
else:
string += " It already points to the correct place."
else:
# exit does not exist before. Create a new one.
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
exit_obj = create.create_object(typeclass, key=exit_name,
location=location,
aliases=exit_aliases, report_to=caller)
if exit_obj:
# storing a destination is what makes it an exit!
exit_obj.destination = destination
string = "Created new Exit '%s' from %s to %s (aliases: %s)." % (exit_name,location.name,
destination.name,
", ".join([str(e) for e in exit_aliases]))
else:
string = "Error: Exit '%s' not created." % (exit_name)
# emit results
caller.msg(string)
return exit_obj
def func(self):
"""
This is where the processing starts.
Uses the ObjManipCommand.parser() for pre-processing
as well as the self.create_exit() method.
"""
caller = self.caller
if not self.args or not self.rhs:
string = "Usage: @open <new exit>[;alias...][:typeclass][,<return exit>[;alias..][:typeclass]]] "
string += "= <destination>"
caller.msg(string)
return
# We must have a location to open an exit
location = caller.location
if not location:
caller.msg("You cannot create an exit from a None-location.")
return
# obtain needed info from cmdline
exit_name = self.lhs_objs[0]['name']
exit_aliases = self.lhs_objs[0]['aliases']
exit_typeclass = self.lhs_objs[0]['option']
dest_name = self.rhs
# first, check so the destination exists.
destination = caller.search(dest_name, global_search=True)
if not destination:
return
# Create exit
ok = self.create_exit(exit_name, location, destination, exit_aliases, exit_typeclass)
if not ok:
# an error; the exit was not created, so we quit.
return
# Create back exit, if any
if len(self.lhs_objs) > 1:
back_exit_name = self.lhs_objs[1]['name']
back_exit_aliases = self.lhs_objs[1]['aliases']
back_exit_typeclass = self.lhs_objs[1]['option']
ok = self.create_exit(back_exit_name, destination, location, back_exit_aliases, back_exit_typeclass)
class CmdSetAttribute(ObjManipCommand):
"""
@set - set attributes
Usage:
@set <obj>/<attr> = <value>
@set <obj>/<attr> =
@set <obj>/<attr>
Sets attributes on objects. The second form clears
a previously set attribute while the last form
inspects the current value of the attribute
(if any).
The most common data to save with this command are strings and
numbers. You can however also set Python primities such as lists,
dictionaries and tuples on objects (this might be important for
the functionality of certain custom objects). This is indicated
by you starting your value with one of {c'{n, {c"{n, {c({n, {c[{n or {c{ {n.
Note that you should leave a space after starting a dictionary ('{ ')
so as to not confuse the dictionary start with a colour code like \{g.
Remember that if you use Python primitives like this, you must
write proper Python syntax too - notably you must include quotes
around your strings or you will get an error.
"""
key = "@set"
locks = "cmd:perm(set) or perm(Builders)"
help_category = "Building"
def convert_from_string(self, strobj):
"""
Converts a single object in *string form* to its equivalent python
type.
Python earlier than 2.6:
Handles floats, ints, and limited nested lists and dicts
(can't handle lists in a dict, for example, this is mainly due to
the complexity of parsing this rather than any technical difficulty -
if there is a need for @set-ing such complex structures on the
command line we might consider adding it).
Python 2.6 and later:
Supports all Python structures through literal_eval as long as they
are valid Python syntax. If they are not (such as [test, test2], ie
withtout the quotes around the strings), the entire structure will
be converted to a string and a warning will be given.
We need to convert like this since all data being sent over the
telnet connection by the Player is text - but we will want to
store it as the "real" python type so we can do convenient
comparisons later (e.g. obj.db.value = 2, if value is stored as a
string this will always fail).
"""
def rec_convert(obj):
"""
Helper function of recursive conversion calls. This is only
used for Python <=2.5. After that literal_eval is available.
"""
# simple types
try: return int(obj)
except ValueError: pass
try: return float(obj)
except ValueError: pass
# iterables
if obj.startswith('[') and obj.endswith(']'):
"A list. Traverse recursively."
return [rec_convert(val) for val in obj[1:-1].split(',')]
if obj.startswith('(') and obj.endswith(')'):
"A tuple. Traverse recursively."
return tuple([rec_convert(val) for val in obj[1:-1].split(',')])
if obj.startswith('{') and obj.endswith('}') and ':' in obj:
"A dict. Traverse recursively."
return dict([(rec_convert(pair.split(":",1)[0]), rec_convert(pair.split(":",1)[1]))
for pair in obj[1:-1].split(',') if ":" in pair])
# if nothing matches, return as-is
return obj
if _LITERAL_EVAL:
# Use literal_eval to parse python structure exactly.
try:
return _LITERAL_EVAL(strobj)
except (SyntaxError, ValueError):
# treat as string
string = "{RNote: Value was converted to string. If you don't want this, "
string += "use proper Python syntax, like enclosing strings in quotes.{n"
self.caller.msg(string)
return utils.to_str(strobj)
else:
# fall back to old recursive solution (does not support nested lists/dicts)
return rec_convert(strobj.strip())
def func(self):
"Implement the set attribute - a limited form of @py."
caller = self.caller
if not self.args:
caller.msg("Usage: @set obj/attr = value. Use empty value to clear.")
return
# get values prepared by the parser
value = self.rhs
objname = self.lhs_objattr[0]['name']
attrs = self.lhs_objattr[0]['attrs']
obj = caller.search(objname)
if not obj:
return
string = ""
if not value:
if self.rhs == None:
# no = means we inspect the attribute(s)
if not attrs:
attrs = [attr.key for attr in obj.get_all_attributes()]
for attr in attrs:
if obj.has_attribute(attr):
string += "\nAttribute %s/%s = %s" % (obj.name, attr, obj.get_attribute(attr))
else:
string += "\n%s has no attribute '%s'." % (obj.name, attr)
# we view it without parsing markup.
self.caller.msg(string.strip(), data={"raw":True})
return
else:
# deleting the attribute(s)
for attr in attrs:
if obj.has_attribute(attr):
val = obj.get_attribute(attr)
obj.del_attribute(attr)
string += "\nDeleted attribute '%s' (= %s) from %s." % (attr, val, obj.name)
else:
string += "\n%s has no attribute '%s'." % (obj.name, attr)
else:
# setting attribute(s). Make sure to convert to real Python type before saving.
for attr in attrs:
try:
obj.set_attribute(attr, self.convert_from_string(value))
string += "\nCreated attribute %s/%s = %s" % (obj.name, attr, value)
except SyntaxError:
# this means literal_eval tried to parse a faulty string
string = "{RCritical Python syntax error in your value. Only primitive Python structures"
string += "\nare allowed. You also need to use correct Python syntax. Remember especially"
string += "\nto put quotes around all strings inside lists and dicts.{n"
# send feedback
caller.msg(string.strip('\n'))
class CmdTypeclass(MuxCommand):
"""
@typeclass - set object typeclass
Usage:
@typclass[/switch] <object> [= <typeclass.path>]
@type ''
@parent ''
Switch:
reset - clean out *all* the attributes on the object -
basically making this a new clean object.
force - change to the typeclass also if the object
already has a typeclass of the same name.
Example:
@type button = examples.red_button.RedButton
View or set an object's typeclass. If setting, the creation hooks
of the new typeclass will be run on the object. If you have
clashing properties on the old class, use /reset. By default you
are protected from changing to a typeclass of the same name as the
one you already have, use /force to override this protection.
The given typeclass must be identified by its location using
python dot-notation pointing to the correct module and class. If
no typeclass is given (or a wrong typeclass is given). Errors in
the path or new typeclass will lead to the old typeclass being
kept. The location of the typeclass module is searched from the
default typeclass directory, as defined in the server settings.
"""
key = "@typeclass"
aliases = "@type, @parent"
locks = "cmd:perm(typeclass) or perm(Builders)"
help_category = "Building"
def func(self):
"Implements command"
caller = self.caller
if not self.args:
caller.msg("Usage: @type <object> [=<typeclass]")
return
# get object to swap on
obj = caller.search(self.lhs)
if not obj:
return
if not self.rhs:
# we did not supply a new typeclass, view the
# current one instead.
if hasattr(obj, "typeclass"):
string = "%s's current typeclass is '%s' (%s)." % (obj.name, obj.typeclass.typename, obj.typeclass.path)
else:
string = "%s is not a typed object." % obj.name
caller.msg(string)
return
# we have an =, a typeclass was supplied.
typeclass = self.rhs
if not obj.access(caller, 'edit'):
caller.msg("You are not allowed to do that.")
return
if not hasattr(obj, 'swap_typeclass') or not hasattr(obj, 'typeclass'):
caller.msg("This object cannot have a type at all!")
return
is_same = obj.is_typeclass(typeclass)
if is_same and not 'force' in self.switches:
string = "%s already has the typeclass '%s'. Use /force to override." % (obj.name, typeclass)
else:
reset = "reset" in self.switches
old_typeclass_path = obj.typeclass.path
ok = obj.swap_typeclass(typeclass, clean_attributes=reset)
if ok:
if is_same:
string = "%s updated its existing typeclass (%s).\n" % (obj.name, obj.typeclass.path)
else:
string = "%s's changed typeclass from %s to %s.\n" % (obj.name,
old_typeclass_path,
obj.typeclass.path)
string += "Creation hooks were run."
if reset:
string += " All old attributes where deleted before the swap."
else:
string += " Note that the typeclassed object could have ended up with a mixture of old"
string += "\nand new attributes. Use /reset to remove old attributes if you don't want this."
else:
string = obj.typeclass_last_errmsg
string += "\nCould not swap '%s' (%s) to typeclass '%s'." % (obj.name,
old_typeclass_path,
typeclass)
caller.msg(string)
class CmdWipe(ObjManipCommand):
"""
@wipe - clears attributes
Usage:
@wipe <object>[/attribute[/attribute...]]
Example:
@wipe box
@wipe box/colour
Wipes all of an object's attributes, or optionally only those
matching the given attribute-wildcard search string.
"""
key = "@wipe"
locks = "cmd:perm(wipe) or perm(Builders)"
help_category = "Building"
def func(self):
"""
inp is the dict produced in ObjManipCommand.parse()
"""
caller = self.caller
if not self.args:
caller.msg("Usage: @wipe <object>[/attribute/attribute...]")
return
# get the attributes set by our custom parser
objname = self.lhs_objattr[0]['name']
attrs = self.lhs_objattr[0]['attrs']
obj = caller.search(objname)
if not obj:
return
if not obj.access(caller, 'edit'):
caller.msg("You are not allowed to do that.")
return
if not attrs:
# wipe everything
for attr in obj.get_all_attributes():
attr.delete()
string = "Wiped all attributes on %s." % obj.name
else:
for attrname in attrs:
obj.attr(attrname, delete=True )
string = "Wiped attributes %s on %s."
string = string % (",".join(attrs), obj.name)
caller.msg(string)
class CmdLock(ObjManipCommand):
"""
lock - assign a lock definition to an object
Usage:
@lock <object>[ = <lockstring>]
or
@lock[/switch] object/<access_type>
Switch:
del - delete given access type
view - view lock associated with given access type (default)
If no lockstring is given, shows all locks on
object.
Lockstring is on the form
'access_type:[NOT] func1(args)[ AND|OR][ NOT] func2(args) ...]
Where func1, func2 ... valid lockfuncs with or without arguments.
Separator expressions need not be capitalized.
For example:
'get: id(25) or perm(Wizards)'
The 'get' access_type is checked by the get command and will
an object locked with this string will only be possible to
pick up by Wizards or by object with id 25.
You can add several access_types after oneanother by separating
them by ';', i.e:
'get:id(25);delete:perm(Builders)'
"""
key = "@lock"
aliases = ["@locks", "lock", "locks"]
locks = "cmd: perm(@locks) or perm(Builders)"
help_category = "Building"
def func(self):
"Sets up the command"
caller = self.caller
if not self.args:
string = "@lock <object>[ = <lockstring>] or @lock[/switch] object/<access_type>"
caller.msg(string)
return
if '/' in self.lhs:
# call on the form @lock obj/access_type
objname, access_type = [p.strip() for p in self.lhs.split('/', 1)]
obj = caller.search(objname)
if not obj:
return
lockdef = obj.locks.get(access_type)
if lockdef:
if 'del' in self.switches:
if not obj.access(caller, 'control'):
caller.msg("You are not allowed to do that.")
return
obj.locks.delete(access_type)
string = "deleted lock %s" % lockdef
else:
string = "%s has no lock of access type '%s'." % (obj, access_type)
caller.msg(string)
return
if self.rhs:
# we have a = separator, so we are assigning a new lock
objname, lockdef = self.lhs, self.rhs
obj = caller.search(objname)
if not obj:
return
if not obj.access(caller, 'control'):
caller.msg("You are not allowed to do that.")
return
ok = obj.locks.add(lockdef, caller)
if ok:
caller.msg("Added lock '%s' to %s." % (lockdef, obj))
return
# if we get here, we are just viewing all locks
obj = caller.search(self.lhs)
if not obj:
return
caller.msg(obj.locks)
class CmdExamine(ObjManipCommand):
"""
examine - detailed info on objects
Usage:
examine [<object>[/attrname]]
examine [*<player>[/attrname]]
Switch:
player - examine a Player (same as adding *)
The examine command shows detailed game info about an
object and optionally a specific attribute on it.
If object is not specified, the current location is examined.
Append a * before the search string to examine a player.
"""
key = "@examine"
aliases = ["@ex","ex", "exam", "examine"]
locks = "cmd:perm(examine) or perm(Builders)"
help_category = "Building"
arg_regex = r"(/\w+?(\s|$))|\s|$"
player_mode = False
def list_attribute(self, crop, attr, value):
"""
Formats a single attribute line.
"""
if crop and isinstance(value, basestring):
value = utils.crop(value)
value = repr(value)
string = "\n %s = %s" % (attr, value)
string = raw(string)
return string
def format_attributes(self, obj, attrname=None, crop=True):
"""
Helper function that returns info about attributes and/or
non-persistent data stored on object
"""
if attrname:
db_attr = [(attrname, obj.attr(attrname))]
try:
ndb_attr = [(attrname, object.__getattribute__(obj.ndb, attrname))]
except Exception:
ndb_attr = None
else:
if self.player_mode:
db_attr = [(attr.key, attr.value) for attr in PlayerAttribute.objects.filter(db_obj=obj)]
else:
db_attr = [(attr.key, attr.value) for attr in ObjAttribute.objects.filter(db_obj=obj)]
try:
ndb_attr = [(aname, avalue) for aname, avalue in obj.ndb.__dict__.items() if not aname.startswith("_")]
except Exception:
ndb_attr = None
string = ""
if db_attr and db_attr[0]:
string += "\n{wPersistent attributes{n:"
for attr, value in db_attr:
string += self.list_attribute(crop, attr, value)
if ndb_attr and ndb_attr[0]:
string += "\n{wNon-Persistent attributes{n:"
for attr, value in ndb_attr:
string += self.list_attribute(crop, attr, value)
return string
def format_output(self, obj, avail_cmdset):
"""
Helper function that creates a nice report about an object.
returns a string.
"""
string = "\n{wName/key{n: {c%s{n (%s)" % (obj.name, obj.dbref)
if hasattr(obj, "aliases") and obj.aliases:
string += "\n{wAliases{n: %s" % (", ".join(utils.make_iter(obj.aliases)))
if hasattr(obj, "has_player") and obj.has_player:
string += "\n{wPlayer{n: {c%s{n" % obj.player.name
perms = obj.player.permissions
if obj.player.is_superuser:
perms = ["<Superuser>"]
elif not perms:
perms = ["<None>"]
string += "\n{wPlayer Perms{n: %s" % (", ".join(perms))
string += "\n{wTypeclass{n: %s (%s)" % (obj.typeclass.typename, obj.typeclass_path)
if hasattr(obj, "location"):
string += "\n{wLocation{n: %s" % obj.location
if obj.location:
string += " (#%s)" % obj.location.id
if hasattr(obj, "destination"):
string += "\n{wDestination{n: %s" % obj.destination
if obj.destination:
string += " (#%s)" % obj.destination.id
perms = obj.permissions
if perms:
perms_string = (", ".join(perms))
else:
perms_string = "Default"
string += "\n{wPermissions{n: %s" % perms_string
locks = str(obj.locks)
if locks:
locks_string = utils.fill("; ".join([lock for lock in locks.split(';')]), indent=6)
else:
locks_string = " Default"
string += "\n{wLocks{n:%s" % locks_string
if not (len(obj.cmdset.all()) == 1 and obj.cmdset.current.key == "Empty"):
# list the current cmdsets
all_cmdsets = obj.cmdset.all() + (hasattr(obj, "player") and obj.player and obj.player.cmdset.all() or [])
all_cmdsets.sort(key=lambda x:x.priority, reverse=True)
string += "\n{wCurrent Cmdset(s){n:\n %s" % ("\n ".join("%s (prio %s)" % (cmdset.path, cmdset.priority) for cmdset in all_cmdsets))
# list the commands available to this object
avail_cmdset = sorted([cmd.key for cmd in avail_cmdset if cmd.access(obj, "cmd")])
cmdsetstr = utils.fill(", ".join(avail_cmdset), indent=2)
string += "\n{wCommands available to %s (all cmdsets + exits and external cmds){n:\n %s" % (obj.key, cmdsetstr)
if hasattr(obj, "scripts") and hasattr(obj.scripts, "all") and obj.scripts.all():
string += "\n{wScripts{n:\n %s" % obj.scripts
# add the attributes
string += self.format_attributes(obj)
# add the contents
exits = []
pobjs = []
things = []
if hasattr(obj, "contents"):
for content in obj.contents:
if content.destination:
exits.append(content)
elif content.player:
pobjs.append(content)
else:
things.append(content)
if exits:
string += "\n{wExits{n: %s" % ", ".join([exit.name for exit in exits])
if pobjs:
string += "\n{wCharacters{n: %s" % ", ".join(["{c%s{n" % pobj.name for pobj in pobjs])
if things:
string += "\n{wContents{n: %s" % ", ".join([cont.name for cont in obj.contents
if cont not in exits and cont not in pobjs])
separater = "="*78
#output info
return '%s\n%s\n%s' % ( separater, string.strip(), separater )
def func(self):
"Process command"
caller = self.caller
def get_cmdset_callback(cmdset):
"""
We make use of the cmdhandeler.get_and_merge_cmdsets below. This
is an asynchronous function, returning a Twisted deferred.
So in order to properly use this we need use this callback;
it is called with the result of get_and_merge_cmdsets, whenever
that function finishes. Taking the resulting cmdset, we continue
to format and output the result.
"""
string = self.format_output(obj, cmdset)
caller.msg(string.strip())
if not self.args:
# If no arguments are provided, examine the invoker's location.
obj = caller.location
if not obj.access(caller, 'examine'):
#If we don't have special info access, just look at the object instead.
caller.execute_cmd('look %s' % obj.name)
return
# using callback for printing result whenever function returns.
get_and_merge_cmdsets(obj).addCallback(get_cmdset_callback)
return
# we have given a specific target object
for objdef in self.lhs_objattr:
obj_name = objdef['name']
obj_attrs = objdef['attrs']
self.player_mode = "player" in self.switches or obj_name.startswith('*')
obj = caller.search(obj_name, player=self.player_mode, global_dbref=True)
if not obj:
continue
if not obj.access(caller, 'examine'):
#If we don't have special info access, just look at the object instead.
caller.execute_cmd('look %s' % obj_name)
continue
if obj_attrs:
for attrname in obj_attrs:
# we are only interested in specific attributes
caller.msg(self.format_attributes(obj, attrname, crop=False))
else:
# using callback to print results whenever function returns.
get_and_merge_cmdsets(obj).addCallback(get_cmdset_callback)
class CmdFind(MuxCommand):
"""
find objects
Usage:
@find[/switches] <name or dbref or *player> [= dbrefmin[-dbrefmax]]
Switches:
room - only look for rooms (location=None)
exit - only look for exits (destination!=None)
char - only look for characters (BASE_CHARACTER_TYPECLASS)
Searches the database for an object of a particular name or dbref.
Use *playername to search for a player. The switches allows for
limiting object matches to certain game entities. Dbrefmin and dbrefmax
limits matches to within the given dbrefs, or above/below if only one is given.
"""
key = "@find"
aliases = "find, @search, search, @locate, locate"
locks = "cmd:perm(find) or perm(Builders)"
help_category = "Building"
def func(self):
"Search functionality"
caller = self.caller
switches = self.switches
if not self.args:
caller.msg("Usage: @find <string> [= low [-high]]")
return
searchstring = self.lhs
low, high = 1, ObjectDB.objects.all().order_by("-id")[0].id
if self.rhs:
if "-" in self.rhs:
# also support low-high syntax
limlist = [part.strip() for part in self.rhs.split("-", 1)]
else:
# otherwise split by space
limlist = self.rhs.split(None, 1)
if limlist and limlist[0].isdigit():
low = max(low, int(limlist[0]))
if len(limlist) > 1 and limlist[1].isdigit():
high = min(high, int(limlist[1]))
low = min(low, high)
high = max(low, high)
if searchstring.startswith("*") or utils.dbref(searchstring):
# A player/dbref search.
# run a normal player- or dbref search. This should be unique.
string = "{wMatch{n(#%i-#%i):" % (low, high)
result = caller.search(searchstring, global_search=True)
if not result:
return
if not low <= int(result.id) <= high:
string += "\n {RNo match found for '%s' within the given dbref limits.{n" % searchstring
else:
string += "\n{g %s(%s) - %s{n" % (result.key, result.dbref, result.typeclass.path)
else:
# Not a player/dbref search but a wider search; build a queryset.
results = ObjectDB.objects.filter(db_key__istartswith=searchstring, id__gte=low, id__lte=high)
if "room" in switches:
results = results.filter(db_location__isnull=True)
if "exit" in switches:
results = results.filter(db_destination__isnull=False)
if "char" in switches:
results = results.filter(db_typeclass_path=CHAR_TYPECLASS)
nresults = results.count()
if not nresults:
# no matches on the keys. Try aliases instead.
results = results = ObjectDB.alias_set.related.model.objects.filter(db_key=searchstring)
if "room" in switches:
results = results.filter(db_obj__db_location__isnull=True)
if "exit" in switches:
results = results.filter(db_obj__db_destination__isnull=False)
if "char" in switches:
results = results.filter(db_obj__db_typeclass_path=CHAR_TYPECLASS)
# we have to parse alias -> real object here
results = [result.db_obj for result in results]
nresults = len(results)
restrictions = ""
if self.switches:
restrictions = ", %s" % (",".join(self.switches))
if nresults:
# convert result to typeclasses.
results = [result.typeclass for result in results]
if nresults > 1:
string = "{w%i Matches{n(#%i-#%i%s):" % (nresults, low, high, restrictions)
for res in results:
string += "\n {g%s(%s) - %s{n" % (res.key, res.dbref, res.path)
else:
string = "{wOne Match{n(#%i-#%i%s):" % (low, high, restrictions)
string += "\n {g%s(%s) - %s{n" % (results[0].key, results[0].dbref, results[0].path)
else:
string = "{wMatch{n(#%i-#%i%s):" % (low, high, restrictions)
string += "\n {RNo matches found for '%s'{n" % searchstring
# send result
caller.msg(string.strip())
class CmdTeleport(MuxCommand):
"""
teleport object to another location
Usage:
@tel/switch [<object> =] <target location>
Examples:
@tel Limbo
@tel/quiet box Limbo
@tel/tonone box
Switches:
quiet - don't echo leave/arrive messages to the source/target
locations for the move.
intoexit - if target is an exit, teleport INTO
the exit object instead of to its destination
tonone - if set, teleport the object to a None-location. If this
switch is set, <target location> is ignored.
Note that the only way to retrieve
an object from a None location is by direct #dbref
reference.
Teleports an object somewhere. If no object is given, you yourself
is teleported to the target location. """
key = "@tel"
aliases = "@teleport"
locks = "cmd:perm(teleport) or perm(Builders)"
help_category = "Building"
def func(self):
"Performs the teleport"
caller = self.caller
args = self.args
lhs, rhs = self.lhs, self.rhs
switches = self.switches
# setting switches
tel_quietly = "quiet" in switches
to_none = "tonone" in switches
if to_none:
# teleporting to None
if not args:
obj_to_teleport = caller
caller.msg("Teleported to None-location.")
if caller.location and not tel_quietly:
caller.location.msg_contents("%s teleported into nothingness." % caller, exclude=caller)
else:
obj_to_teleport = caller.search(lhs, global_search=True)
if not obj_to_teleport:
caller.msg("Did not find object to teleport.")
return
caller.msg("Teleported %s -> None-location." % obj_to_teleport)
if obj_to_teleport.location and not tel_quietly:
obj_to_teleport.location.msg_contents("%s teleported %s into nothingness."
% (caller, obj_to_teleport),
exclude=caller)
obj_to_teleport.location=None
return
# not teleporting to None location
if not args and not to_none:
caller.msg("Usage: teleport[/switches] [<obj> =] <target_loc>|home")
return
if rhs:
obj_to_teleport = caller.search(lhs, global_search=True)
destination = caller.search(rhs, global_search=True)
else:
obj_to_teleport = caller
destination = caller.search(lhs, global_search=True)
if not obj_to_teleport:
caller.msg("Did not find object to teleport.")
return
if not destination:
caller.msg("Destination not found.")
return
if obj_to_teleport == destination:
caller.msg("You can't teleport an object inside of itself!")
return
if obj_to_teleport.location and obj_to_teleport.location == destination:
caller.msg("%s is already at %s." % (obj_to_teleport, destination))
return
use_destination = True
if "intoexit" in self.switches:
use_destination = False
# try the teleport
if obj_to_teleport.move_to(destination, quiet=tel_quietly, emit_to_obj=caller,
use_destination=use_destination):
if obj_to_teleport == caller:
caller.msg("Teleported to %s." % destination)
else:
caller.msg("Teleported %s -> %s." % (obj_to_teleport, destination))
class CmdScript(MuxCommand):
"""
attach scripts
Usage:
@script[/switch] <obj> [= <script.path or scriptkey>]
Switches:
start - start all non-running scripts on object, or a given script only
stop - stop all scripts on objects, or a given script only
If no script path/key is given, lists all scripts active on the given
object.
Script path can be given from the base location for scripts as given in
settings. If adding a new script, it will be started automatically (no /start
switch is needed). Using the /start or /stop switches on an object without
specifying a script key/path will start/stop ALL scripts on the object.
"""
key = "@script"
aliases = "@addscript"
locks = "cmd:perm(script) or perm(Builders)"
help_category = "Building"
def func(self):
"Do stuff"
caller = self.caller
if not self.args:
string = "Usage: @script[/switch] <obj> [= <script.path or script key>]"
caller.msg(string)
return
obj = caller.search(self.lhs)
if not obj:
return
string = ""
if not self.rhs:
# no rhs means we want to operate on all scripts
scripts = obj.scripts.all()
if not scripts:
string += "No scripts defined on %s." % obj.key
elif not self.switches:
# view all scripts
from src.commands.default.system import format_script_list
string += format_script_list(scripts)
elif "start" in self.switches:
num = sum([obj.scripts.start(script.key) for script in scripts])
string += "%s scripts started on %s." % (num, obj.key)
elif "stop" in self.switches:
for script in scripts:
string += "Stopping script %s on %s." % (script.key, obj.key)
script.stop()
string = string.strip()
obj.scripts.validate()
else: # rhs exists
if not self.switches:
# adding a new script, and starting it
ok = obj.scripts.add(self.rhs, autostart=True)
if not ok:
string += "\nScript %s could not be added and/or started on %s." % (self.rhs, obj.key)
else:
string = "Script {w%s{n successfully added and started on %s." % (self.rhs, obj.key)
else:
paths = [self.rhs] + ["%s.%s" % (prefix, self.rhs)
for prefix in settings.SCRIPT_TYPECLASS_PATHS]
if "stop" in self.switches:
# we are stopping an already existing script
for path in paths:
ok = obj.scripts.stop(path)
if not ok:
string += "\nScript %s could not be stopped. Does it exist?" % path
else:
string = "Script stopped and removed from object."
break
if "start" in self.switches:
# we are starting an already existing script
for path in paths:
ok = obj.scripts.start(path)
if not ok:
string += "\nScript %s could not be (re)started." % path
else:
string = "Script started successfully."
break
caller.msg(string.strip())
| {
"content_hash": "7b3f8b34947f2d60ce016238f53ee2b9",
"timestamp": "",
"source": "github",
"line_count": 2031,
"max_line_length": 153,
"avg_line_length": 38.32299359921221,
"alnum_prop": 0.5459824755248349,
"repo_name": "YourCyborg/Sun-RPI",
"id": "dcbba718d29467ab31a52fa622c6eb04400bf1cd",
"size": "77834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/commands/default/building.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "JavaScript",
"bytes": "10522"
},
{
"name": "Python",
"bytes": "2151966"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_context_ok
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not_
from sqlalchemy.testing import is_true
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not_(c.connection, c2.connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not_(c1, c2)
is_(c2, r1.connection)
eq_(c2.mock_calls, [])
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append("checkin")
event.listen(p, "checkin", checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ["checkin"])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
engine.execute(select([1])).close()
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
reaper = testing.engines.ConnectionKiller()
reaper.add_pool(p)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
reaper.assert_all_closed()
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
time.sleep(2)
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not_(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not_(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
c3 = p.connect()
is_not_(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.predictable_gc
def test_userspace_disconnectionerror_weakref_finalizer(self):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2
)
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
# new connection was reset on return appropriately
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c3)
pc2 = p.connect()
is_(pc2.connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c1)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_reset_agent_disconnect(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
p._invalidate(self.conn)
raise Exception("hi")
def commit(self):
self.conn.commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
# no warning raised. We know it would warn due to
# QueuePoolTest.test_no_double_checkin
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
| {
"content_hash": "fa43634bf033e5bc7e0e21edefc757cb",
"timestamp": "",
"source": "github",
"line_count": 1955,
"max_line_length": 79,
"avg_line_length": 27.90843989769821,
"alnum_prop": 0.5459210791591064,
"repo_name": "graingert/sqlalchemy",
"id": "72e0fa1865a53355ce9ca927ef016156a3b37acb",
"size": "54561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/engine/test_pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49149"
},
{
"name": "Python",
"bytes": "11845913"
}
],
"symlink_target": ""
} |
"""
.. module:: layer
:platform: Windows, Linux
:synopsis: Class that contians feature service layer information.
.. moduleauthor:: Esri
"""
from .._abstract import abstract
from ..security import security
import types
from ..common import filters
from ..common.geometry import SpatialReference
from ..common.general import _date_handler, _unicode_convert, Feature
from ..common.spatial import scratchFolder, scratchGDB, json_to_featureclass
from ..common.spatial import get_OID_field, get_records_with_attachments
from ..common.spatial import create_feature_layer, merge_feature_class
from ..common.spatial import featureclass_to_json, create_feature_class
from ..common.spatial import get_attachment_data
from ..common.general import FeatureSet
from ..hostedservice import AdminFeatureServiceLayer
import featureservice
import os
import json
import math
import urlparse
import mimetypes
import uuid
from re import search
from urlparse import urlparse
########################################################################
class FeatureLayer(abstract.BaseAGOLClass):
"""
This contains information about a feature service's layer.
"""
_objectIdField = None
_allowGeometryUpdates = None
_globalIdField = None
_token_url = None
_currentVersion = None
_id = None
_name = None
_type = None
_description = None
_definitionExpression = None
_geometryType = None
_hasZ = None
_hasM = None
_copyrightText = None
_parentLayer = None
_subLayers = None
_minScale = None
_maxScale = None
_effectiveMinScale = None
_effectiveMaxScale = None
_defaultVisibility = None
_extent = None
_timeInfo = None
_drawingInfo = None
_hasAttachments = None
_htmlPopupType = None
_displayField = None
_typeIdField = None
_fields = None
_types = None # sub-types
_relationships = None
_maxRecordCount = None
_canModifyLayer = None
_supportsValidateSql = None
_supportsCoordinatesQuantization = None
_supportsStatistics = None
_supportsAdvancedQueries = None
_hasLabels = None
_canScaleSymbols = None
_capabilities = None
_supportedQueryFormats = None
_isDataVersioned = None
_ownershipBasedAccessControlForFeatures = None
_useStandardizedQueries = None
_templates = None
_indexes = None
_hasStaticData = None
_supportsRollbackOnFailureParameter = None
_advancedQueryCapabilities = None
_editingInfo = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_supportsCalculate = None
_supportsAttachmentsByUploadId = None
_editFieldsInfo = None
_serverURL = None
_supportsValidateSql = None
_supportsCoordinatesQuantization = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
initialize=False,
proxy_url=None,
proxy_port=None):
"""Constructor"""
self._url = url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if securityHandler is not None and \
isinstance(securityHandler, abstract.BaseSecurityHandler):
self._securityHandler = securityHandler
if not securityHandler.referer_url is None:
self._referer_url = securityHandler.referer_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the service """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented in Feature Layer."
self._parentLayer = featureservice.FeatureService(
url=os.path.dirname(self._url),
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def refresh(self):
"""refreshes all the properties of the service"""
self.__init()
#----------------------------------------------------------------------
def __str__(self):
""" returns object as string """
return json.dumps(dict(self), default=_date_handler)
#----------------------------------------------------------------------
def __iter__(self):
""" iterator generator for public values/properties
It only returns the properties that are public.
"""
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_') and \
not isinstance(getattr(self, attr), (types.MethodType,
types.BuiltinFunctionType,
types.BuiltinMethodType))
]
for att in attributes:
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def url(self):
""" returns the url for the feature layer"""
return self._url
#----------------------------------------------------------------------
@property
def administration(self):
"""returns the hostservice object to manage the back-end functions"""
url = self._url
res = search("/rest/", url).span()
addText = "admin/"
part1 = url[:res[1]]
part2 = url[res[1]:]
adminURL = "%s%s%s" % (part1, addText, part2)
res = AdminFeatureServiceLayer(url=adminURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
return res
#----------------------------------------------------------------------
@property
def supportsValidateSql(self):
""" returns the supports calculate values """
if self._supportsValidateSql is None:
self.__init()
return self._supportsValidateSql
#----------------------------------------------------------------------
@property
def supportsCoordinatesQuantization(self):
""" returns the supports calculate values """
if self._supportsCoordinatesQuantization is None:
self.__init()
return self._supportsCoordinatesQuantization
#----------------------------------------------------------------------
@property
def supportsCalculate(self):
""" returns the supports calculate values """
if self._supportsCalculate is None:
self.__init()
return self._supportsCalculate
#----------------------------------------------------------------------
@property
def editFieldsInfo(self):
""" returns edit field info """
if self._editFieldsInfo is None:
self.__init()
return self._editFieldsInfo
#----------------------------------------------------------------------
@property
def supportsAttachmentsByUploadId(self):
""" returns is supports attachments by uploads id """
if self._supportsAttachmentsByUploadId is None:
self.__init()
return self._supportsAttachmentsByUploadId
#----------------------------------------------------------------------
@property
def editingInfo(self):
""" returns the edit information """
if self._editingInfo is None:
self.__init()
return self._editingInfo
#----------------------------------------------------------------------
@property
def advancedQueryCapabilities(self):
""" returns the advanced query capabilities """
if self._advancedQueryCapabilities is None:
self.__init()
return self._advancedQueryCapabilities
#----------------------------------------------------------------------
@property
def supportsRollbackOnFailureParameter(self):
""" returns if rollback on failure supported """
if self._supportsRollbackOnFailureParameter is None:
self.__init()
return self._supportsRollbackOnFailureParameter
#----------------------------------------------------------------------
@property
def hasStaticData(self):
"""boolean T/F if static data is present """
if self._hasStaticData is None:
self.__init()
return self._hasStaticData
#----------------------------------------------------------------------
@property
def indexes(self):
"""gets the indexes"""
if self._indexes is None:
self.__init()
return self._indexes
#----------------------------------------------------------------------
@property
def templates(self):
""" gets the template """
if self._templates is None:
self.__init()
return self._templates
#----------------------------------------------------------------------
@property
def allowGeometryUpdates(self):
""" returns boolean if geometry updates are allowed """
if self._allowGeometryUpdates is None:
self.__init()
return self._allowGeometryUpdates
#----------------------------------------------------------------------
@property
def globalIdField(self):
""" returns the global id field """
if self._globalIdField is None:
self.__init()
return self._globalIdField
#----------------------------------------------------------------------
@property
def objectIdField(self):
if self._objectIdField is None:
self.__init()
return self._objectIdField
#----------------------------------------------------------------------
@property
def currentVersion(self):
""" returns the current version """
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def id(self):
""" returns the id """
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def name(self):
""" returns the name """
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def type(self):
""" returns the type """
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def description(self):
""" returns the layer's description """
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def definitionExpression(self):
"""returns the definitionExpression"""
if self._definitionExpression is None:
self.__init()
return self._definitionExpression
#----------------------------------------------------------------------
@property
def geometryType(self):
"""returns the geometry type"""
if self._geometryType is None:
self.__init()
return self._geometryType
#----------------------------------------------------------------------
@property
def hasZ(self):
""" returns if it has a Z value or not """
if self._hasZ is None:
self.__init()
return self._hasZ
#----------------------------------------------------------------------
@property
def hasM(self):
""" returns if it has a m value or not """
if self._hasM is None:
self.__init()
return self._hasM
#----------------------------------------------------------------------
@property
def copyrightText(self):
""" returns the copyright text """
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def parentLayer(self):
""" returns information about the parent """
if self._parentLayer is None:
self.__init()
return self._parentLayer
#----------------------------------------------------------------------
@property
def subLayers(self):
""" returns sublayers for layer """
if self._subLayers is None:
self.__init()
return self._subLayers
#----------------------------------------------------------------------
@property
def minScale(self):
""" minimum scale layer will show """
if self._minScale is None:
self.__init()
return self._minScale
@property
def maxScale(self):
""" sets the max scale """
if self._maxScale is None:
self.__init()
return self._maxScale
@property
def effectiveMinScale(self):
""" returns the effective minimum scale value """
if self._effectiveMinScale is None:
self.__init()
return self._effectiveMinScale
@property
def effectiveMaxScale(self):
""" returns the effective maximum scale value """
if self._effectiveMaxScale is None:
self.__init()
return self._effectiveMaxScale
@property
def defaultVisibility(self):
""" returns the default visibility of the layer """
if self._defaultVisibility is None:
self.__init()
return self._defaultVisibility
@property
def extent(self):
""" returns the extent """
if self._extent is None:
self.__init()
return self._extent
@property
def timeInfo(self):
""" returns the time information about the layer """
if self._timeInfo is None:
self.__init()
return self._timeInfo
@property
def drawingInfo(self):
""" returns the symbol information about the layer """
if self._drawingInfo is None:
self.__init()
return self._drawingInfo
@property
def hasAttachments(self):
""" boolean that tells if attachments are associated with layer """
if self._hasAttachments is None:
self.__init()
return self._hasAttachments
@property
def htmlPopupType(self):
""" returns the popup type """
if self._htmlPopupType is None:
self.__init()
return self._htmlPopupType
@property
def displayField(self):
""" returns the primary display field """
if self._displayField is None:
self.__init()
return self._displayField
@property
def typeIdField(self):
""" returns the type Id field """
if self._typeIdField is None:
self.__init()
return self._typeIdField
@property
def fields(self):
""" returns the layer's fields """
if self._fields is None:
self.__init()
return self._fields
@property
def types(self):
""" returns the types """
if self._types is None:
self.__init()
return self._types
@property
def relationships(self):
""" returns the relationships for the layer """
if self._relationships is None:
self.__init()
return self._relationships
@property
def maxRecordCount(self):
""" returns the maximum returned records """
if self._maxRecordCount is None:
self.__init()
if self._maxRecordCount is None:
self._maxRecordCount = 1000
return self._maxRecordCount
@property
def canModifyLayer(self):
""" returns boolean to say if layer can be modified """
if self._canModifyLayer is None:
self.__init()
return self._canModifyLayer
@property
def supportsStatistics(self):
""" boolean to if supports statistics """
if self._supportsStatistics is None:
self.__init()
return self._supportsStatistics
@property
def supportsAdvancedQueries(self):
""" boolean value if advanced queries is supported """
if self._supportsAdvancedQueries is None:
self.__init()
return self._supportsAdvancedQueries
@property
def hasLabels(self):
""" returns if layer has labels on or not """
if self._hasLabels is None:
self.__init()
return self._hasLabels
@property
def canScaleSymbols(self):
""" states if symbols can scale """
if self._canScaleSymbols is None:
self.__init()
return self._canScaleSymbols
@property
def capabilities(self):
""" operations that can be performed on layer """
if self._capabilities is None:
self.__init()
return self._capabilities
@property
def supportedQueryFormats(self):
""" returns supported query formats """
if self._supportedQueryFormats is None:
self.__init()
return self._supportedQueryFormats
@property
def isDataVersioned(self):
""" returns boolean if data is in version control """
if self._isDataVersioned is None:
self.__init()
return self._isDataVersioned
@property
def ownershipBasedAccessControlForFeatures(self):
""" returns value for owernship based access control """
if self._ownershipBasedAccessControlForFeatures is None:
self.__init()
return self._ownershipBasedAccessControlForFeatures
@property
def useStandardizedQueries(self):
""" returns value if standardized queries can be used """
if self._useStandardizedQueries is None:
self.__init()
return self._useStandardizedQueries
#----------------------------------------------------------------------
@property
def securityHandler(self):
""" gets the security handler """
return self._securityHandler
#----------------------------------------------------------------------
@securityHandler.setter
def securityHandler(self, value):
""" sets the security handler """
if isinstance(value, abstract.BaseSecurityHandler):
if isinstance(value, security.AGOLTokenSecurityHandler):
self._securityHandler = value
self._token = value.token
self._username = value.username
self._password = value._password
self._token_url = value.token_url
elif isinstance(value, security.OAuthSecurityHandler):
self._token = value.token
self._securityHandler = value
else:
pass
#----------------------------------------------------------------------
def addAttachment(self, oid, file_path):
""" Adds an attachment to a feature service
Input:
oid - string - OBJECTID value to add attachment to
file_path - string - path to file
Output:
JSON Repsonse
"""
if self.hasAttachments == True:
attachURL = self._url + "/%s/addAttachment" % oid
params = {'f':'json'}
parsed = urlparse(attachURL)
files = []
files.append(('attachment', file_path, os.path.basename(file_path)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
securityHandler=self._securityHandler,
files=files,
fields=params,
port=parsed.port,
ssl=parsed.scheme.lower() == 'https',
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return self._unicode_convert(res)
else:
return "Attachments are not supported for this feature service."
#----------------------------------------------------------------------
def deleteAttachment(self, oid, attachment_id):
""" removes an attachment from a feature service feature
Input:
oid - integer or string - id of feature
attachment_id - integer - id of attachment to erase
Output:
JSON response
"""
url = self._url + "/%s/deleteAttachments" % oid
params = {
"f":"json",
"attachmentIds" : "%s" % attachment_id
}
return self._do_post(url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateAttachment(self, oid, attachment_id, file_path):
""" updates an existing attachment with a new file
Inputs:
oid - string/integer - Unique record ID
attachment_id - integer - Unique attachment identifier
file_path - string - path to new attachment
Output:
JSON response
"""
url = self._url + "/%s/updateAttachment" % oid
params = {
"f":"json",
"attachmentId" : "%s" % attachment_id
}
parsed = urlparse(url)
port = parsed.port
files = []
files.append(('attachment', file_path, os.path.basename(file_path)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files=files,
port=port,
fields=params,
securityHandler=self._securityHandler,
ssl=parsed.scheme.lower() == 'https',
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return self._unicode_convert(res)
#----------------------------------------------------------------------
def listAttachments(self, oid):
""" list attachements for a given OBJECT ID """
url = self._url + "/%s/attachments" % oid
params = {
"f":"json"
}
return self._do_get(url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def create_fc_template(self, out_path, out_name):
"""creates a featureclass template on local disk"""
fields = self.fields
objectIdField = self.objectIdField
geomType = self.geometryType
wkid = self.parentLayer.spatialReference['wkid']
return create_feature_class(out_path,
out_name,
geomType,
wkid,
fields,
objectIdField)
def create_feature_template(self):
"""creates a feature template"""
fields = self.fields
feat_schema = {}
att = {}
for fld in fields:
self._globalIdField
if not fld['name'] == self._objectIdField and not fld['name'] == self._globalIdField:
att[fld['name']] = ''
feat_schema['attributes'] = att
feat_schema['geometry'] = ''
return Feature(feat_schema)
#----------------------------------------------------------------------
def query(self,
where="1=1",
out_fields="*",
timeFilter=None,
geometryFilter=None,
returnGeometry=True,
returnIDsOnly=False,
returnCountOnly=False,
returnFeatureClass=False,
out_fc=None):
""" queries a feature service based on a sql statement
Inputs:
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnGeometry - true means a geometry will be returned,
else just the attributes
returnIDsOnly - false is default. True means only OBJECTIDs
will be returned
returnCountOnly - if True, then an integer is returned only
based on the sql statement
returnFeatureClass - Default False. If true, query will be
returned as feature class
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True.
"""
params = {"f": "json",
"where": where,
"outFields": out_fields,
"returnGeometry" : returnGeometry,
"returnIdsOnly" : returnIDsOnly,
"returnCountOnly" : returnCountOnly,
}
if not timeFilter is None and \
isinstance(timeFilter, filters.TimeFilter):
params['time'] = timeFilter.filter
if not geometryFilter is None and \
isinstance(geometryFilter, filters.GeometryFilter):
gf = geometryFilter.filter
params['geometry'] = gf['geometry']
params['geometryType'] = gf['geometryType']
params['spatialRelationship'] = gf['spatialRel']
params['inSR'] = gf['inSR']
fURL = self._url + "/query"
results = self._do_get(fURL, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'error' in results:
raise ValueError (results)
if not returnCountOnly and not returnIDsOnly:
if returnFeatureClass:
json_text = json.dumps(results)
temp = scratchFolder() + os.sep + uuid.uuid4().get_hex() + ".json"
with open(temp, 'wb') as writer:
writer.write(json_text)
writer.flush()
del writer
fc = json_to_featureclass(json_file=temp,
out_fc=out_fc)
os.remove(temp)
return fc
else:
return FeatureSet.fromJSON(json.dumps(results))
else:
return results
return
#----------------------------------------------------------------------
def query_related_records(self,
objectIds,
relationshipId,
outFields="*",
definitionExpression=None,
returnGeometry=True,
maxAllowableOffset=None,
geometryPrecision=None,
outWKID=None,
gdbVersion=None,
returnZ=False,
returnM=False):
"""
The Query operation is performed on a feature service layer
resource. The result of this operation are feature sets grouped
by source layer/table object IDs. Each feature set contains
Feature objects including the values for the fields requested by
the user. For related layers, if you request geometry
information, the geometry of each feature is also returned in
the feature set. For related tables, the feature set does not
include geometries.
Inputs:
objectIds - the object IDs of the table/layer to be queried
relationshipId - The ID of the relationship to be queried.
outFields - the list of fields from the related table/layer
to be included in the returned feature set. This
list is a comma delimited list of field names. If
you specify the shape field in the list of return
fields, it is ignored. To request geometry, set
returnGeometry to true.
You can also specify the wildcard "*" as the
value of this parameter. In this case, the result
s will include all the field values.
definitionExpression - The definition expression to be
applied to the related table/layer.
From the list of objectIds, only those
records that conform to this
expression are queried for related
records.
returnGeometry - If true, the feature set includes the
geometry associated with each feature. The
default is true.
maxAllowableOffset - This option can be used to specify the
maxAllowableOffset to be used for
generalizing geometries returned by the
query operation. The maxAllowableOffset
is in the units of the outSR. If outSR
is not specified, then
maxAllowableOffset is assumed to be in
the unit of the spatial reference of the
map.
geometryPrecision - This option can be used to specify the
number of decimal places in the response
geometries.
outWKID - The spatial reference of the returned geometry.
gdbVersion - The geodatabase version to query. This parameter
applies only if the isDataVersioned property of
the layer queried is true.
returnZ - If true, Z values are included in the results if
the features have Z values. Otherwise, Z values are
not returned. The default is false.
returnM - If true, M values are included in the results if
the features have M values. Otherwise, M values are
not returned. The default is false.
"""
params = {
"f" : "json",
"objectIds" : objectIds,
"relationshipId" : relationshipId,
"outFields" : outFields,
"returnGeometry" : returnGeometry,
"returnM" : returnM,
"returnZ" : returnZ
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if definitionExpression is not None:
params['definitionExpression'] = definitionExpression
if outWKID is not None:
params['outSR'] = SpatialReference(outWKID).asDictionary
if maxAllowableOffset is not None:
params['maxAllowableOffset'] = maxAllowableOffset
if geometryPrecision is not None:
params['geometryPrecision'] = geometryPrecision
quURL = self._url + "/queryRelatedRecords"
res = self._do_get(url=quURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return res
#----------------------------------------------------------------------
def getHTMLPopup(self, oid):
"""
The htmlPopup resource provides details about the HTML pop-up
authored by the user using ArcGIS for Desktop.
Input:
oid - object id of the feature where the HTML pop-up
Output:
"""
if self.htmlPopupType != "esriServerHTMLPopupTypeNone":
popURL = self._url + "/%s/htmlPopup" % oid
params = {
'f' : "json"
}
return self._do_get(url=popURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return ""
#----------------------------------------------------------------------
def _chunks(self, l, n):
""" Yield n successive chunks from a list l.
"""
l.sort()
newn = int(1.0 * len(l) / n + 0.5)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
#----------------------------------------------------------------------
def get_local_copy(self, out_path, includeAttachments=False):
""" exports the whole feature service to a feature class
Input:
out_path - path to where the data will be placed
includeAttachments - default False. If sync is not supported
then the paramter is ignored.
Output:
path to exported feature class or fgdb (as list)
"""
if self.hasAttachments and \
self.parentLayer.syncEnabled:
return self.parentLayer.createReplica(replicaName="fgdb_dump",
layers="%s" % self.id,
returnAsFeatureClass=True,
returnAttachments=includeAttachments,
out_path=out_path)[0]
elif self.hasAttachments == False and \
self.parentLayer.syncEnabled:
return self.parentLayer.createReplica(replicaName="fgdb_dump",
layers="%s" % self.id,
returnAsFeatureClass=True,
out_path=out_path)[0]
else:
result_features = []
res = self.query(returnIDsOnly=True)
OIDS = res['objectIds']
OIDS.sort()
OIDField = res['objectIdFieldName']
count = len(OIDS)
if count <= self.maxRecordCount:
bins = 1
else:
bins = count / self.maxRecordCount
v = count % self.maxRecordCount
if v > 0:
bins += 1
chunks = self._chunks(OIDS, bins)
for chunk in chunks:
chunk.sort()
sql = "%s >= %s and %s <= %s" % (OIDField, chunk[0],
OIDField, chunk[len(chunk) -1])
temp_base = "a" + uuid.uuid4().get_hex()[:6] + "a"
temp_fc = r"%s\%s" % (scratchGDB(), temp_base)
temp_fc = self.query(where=sql,
returnFeatureClass=True,
out_fc=temp_fc)
result_features.append(temp_fc)
return merge_feature_class(merges=result_features,
out_fc=out_path)
#----------------------------------------------------------------------
def updateFeature(self,
features,
gdbVersion=None,
rollbackOnFailure=True):
"""
updates an existing feature in a feature service layer
Input:
feature - feature object(s) to get updated. A single
feature, a list of feature objects can be passed,
or a FeatureSet object.
Output:
dictionary of result messages
"""
params = {
"f" : "json",
"rollbackOnFailure" : rollbackOnFailure
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(features, Feature):
params['features'] = json.dumps([features.asDictionary],
default=_date_handler
)
elif isinstance(features, list):
vals = []
for feature in features:
if isinstance(feature, Feature):
vals.append(feature.asDictionary)
params['features'] = json.dumps(vals,
default=_date_handler
)
elif isinstance(features, FeatureSet):
params['features'] = json.dumps(
[feature.asDictionary for feature in features.features],
default=_date_handler
)
else:
return {'message' : "invalid inputs"}
updateURL = self._url + "/updateFeatures"
res = self._do_post(url=updateURL,
securityHandler=self._securityHandler,
param_dict=params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return res
#----------------------------------------------------------------------
def deleteFeatures(self,
objectIds="",
where="",
geometryFilter=None,
gdbVersion=None,
rollbackOnFailure=True
):
""" removes 1:n features based on a sql statement
Input:
objectIds - The object IDs of this layer/table to be deleted
where - A where clause for the query filter. Any legal SQL
where clause operating on the fields in the layer is
allowed. Features conforming to the specified where
clause will be deleted.
geometryFilter - a filters.GeometryFilter object to limit
deletion by a geometry.
gdbVersion - Geodatabase version to apply the edits. This
parameter applies only if the isDataVersioned
property of the layer is true
rollbackOnFailure - parameter to specify if the edits should
be applied only if all submitted edits
succeed. If false, the server will apply
the edits that succeed even if some of
the submitted edits fail. If true, the
server will apply the edits only if all
edits succeed. The default value is true.
Output:
JSON response as dictionary
"""
dURL = self._url + "/deleteFeatures"
params = {
"f": "json",
}
if geometryFilter is not None and \
isinstance(geometryFilter, filters.GeometryFilter):
gfilter = geometryFilter.filter
params['geometry'] = gfilter['geometry']
params['geometryType'] = gfilter['geometryType']
params['inSR'] = gfilter['inSR']
params['spatialRel'] = gfilter['spatialRel']
if where is not None and \
where != "":
params['where'] = where
if objectIds is not None and \
objectIds != "":
params['objectIds'] = objectIds
result = self._do_post(url=dURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return result
#----------------------------------------------------------------------
def applyEdits(self,
addFeatures=[],
updateFeatures=[],
deleteFeatures=None,
gdbVersion=None,
rollbackOnFailure=True):
"""
This operation adds, updates, and deletes features to the
associated feature layer or table in a single call.
Inputs:
addFeatures - The array of features to be added. These
features should be common.Feature objects
updateFeatures - The array of features to be updateded.
These features should be common.Feature
objects
deleteFeatures - string of OIDs to remove from service
gdbVersion - Geodatabase version to apply the edits.
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
dictionary of messages
"""
editURL = self._url + "/applyEdits"
params = {"f": "json"
}
if len(addFeatures) > 0 and \
isinstance(addFeatures[0], Feature):
params['adds'] = json.dumps([f.asDictionary for f in addFeatures],
default=_date_handler)
if len(updateFeatures) > 0 and \
isinstance(updateFeatures[0], Feature):
params['updates'] = json.dumps([f.asDictionary for f in updateFeatures],
default=_date_handler)
if deleteFeatures is not None and \
isinstance(deleteFeatures, str):
params['deletes'] = deleteFeatures
return self._do_post(url=editURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def addFeature(self, features,
gdbVersion=None,
rollbackOnFailure=True):
""" Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object or a FeatureSet object
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary
"""
url = self._url + "/addFeatures"
params = {
"f" : "json"
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(rollbackOnFailure, bool):
params['rollbackOnFailure'] = rollbackOnFailure
if isinstance(features, list):
params['features'] = json.dumps([feature.asDictionary for feature in features],
default=_date_handler)
elif isinstance(features, Feature):
params['features'] = json.dumps([features.asDictionary],
default=_date_handler)
elif isinstance(features, FeatureSet):
params['features'] = json.dumps([feature.asDictionary for feature in feature.features],
default=_date_handler)
else:
return None
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def addFeatures(self, fc, attachmentTable=None,
nameField="ATT_NAME", blobField="DATA",
contentTypeField="CONTENT_TYPE",
rel_object_field="REL_OBJECTID"):
""" adds a feature to the feature service
Inputs:
fc - string - path to feature class data to add.
attachmentTable - string - (optional) path to attachment table
nameField - string - (optional) name of file field in attachment table
blobField - string - (optional) name field containing blob data
contentTypeField - string - (optional) name of field containing content type
rel_object_field - string - (optional) name of field with OID of feature class
Output:
boolean, add results message as list of dictionaries
"""
messages = {'addResults':[]}
if attachmentTable is None:
count = 0
bins = 1
uURL = self._url + "/addFeatures"
max_chunk = 250
js = json.loads(self._unicode_convert(
featureclass_to_json(fc)))
js = js['features']
if len(js) == 0:
return {'addResults':None}
if len(js) <= max_chunk:
bins = 1
else:
bins = int(len(js)/max_chunk)
if len(js) % max_chunk > 0:
bins += 1
chunks = self._chunks(l=js, n=bins)
for chunk in chunks:
params = {
"f" : 'json',
"features" : json.dumps(chunk,
default=self._date_handler)
}
result = self._do_post(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if messages is None:
messages = result
else:
if 'addResults' in result:
if 'addResults' in messages:
messages['addResults'] = messages['addResults'] + result['addResults']
else:
messages['addResults'] = result['addResults']
else:
messages['errors'] = result
del params
del result
return messages
else:
oid_field = get_OID_field(fc)
OIDs = get_records_with_attachments(attachment_table=attachmentTable)
fl = create_feature_layer(fc, "%s not in ( %s )" % (oid_field, ",".join(OIDs)))
result = self.addFeatures(fl)
if result is not None:
messages.update(result)
del fl
for oid in OIDs:
fl = create_feature_layer(fc, "%s = %s" % (oid_field, oid), name="layer%s" % oid)
msgs = self.addFeatures(fl)
for result in msgs['addResults']:
oid_fs = result['objectId']
sends = get_attachment_data(attachmentTable, sql="%s = %s" % (rel_object_field, oid))
result['addAttachmentResults'] = []
for s in sends:
attRes = self.addAttachment(oid_fs, s['blob'])
if 'addAttachmentResult' in attRes:
attRes['addAttachmentResult']['AttachmentName'] = s['name']
result['addAttachmentResults'].append(attRes['addAttachmentResult'])
else:
attRes['AttachmentName'] = s['name']
result['addAttachmentResults'].append(attRes)
del s
del sends
del result
messages.update( msgs)
del fl
del oid
del OIDs
return messages
#----------------------------------------------------------------------
def calculate(self, where, calcExpression, sqlFormat="standard"):
"""
The calculate operation is performed on a feature service layer
resource. It updates the values of one or more fields in an
existing feature service layer based on SQL expressions or scalar
values. The calculate operation can only be used if the
supportsCalculate property of the layer is true.
Neither the Shape field nor system fields can be updated using
calculate. System fields include ObjectId and GlobalId.
See Calculate a field for more information on supported expressions
Inputs:
where - A where clause can be used to limit the updated records.
Any legal SQL where clause operating on the fields in
the layer is allowed.
calcExpression - The array of field/value info objects that
contain the field or fields to update and their
scalar values or SQL expression. Allowed types
are dictionary and list. List must be a list
of dictionary objects.
Calculation Format is as follows:
{"field" : "<field name>",
"value" : "<value>"}
sqlFormat - The SQL format for the calcExpression. It can be
either standard SQL92 (standard) or native SQL
(native). The default is standard.
Values: standard, native
Output:
JSON as string
Usage:
>>>sh = arcrest.AGOLTokenSecurityHandler("user", "pw")
>>>fl = arcrest.agol.FeatureLayer(url="someurl",
securityHandler=sh, initialize=True)
>>>print fl.calculate(where="OBJECTID < 2",
calcExpression={"field": "ZONE",
"value" : "R1"})
{'updatedFeatureCount': 1, 'success': True}
"""
url = self._url + "/calculate"
params = {
"f" : "json",
"where" : where,
}
if isinstance(calcExpression, dict):
params["calcExpression"] = json.dumps([calcExpression],
default=_date_handler)
elif isinstance(calcExpression, list):
params["calcExpression"] = json.dumps(calcExpression,
default=_date_handler)
if sqlFormat.lower() in ['native', 'standard']:
params['sqlFormat'] = sqlFormat.lower()
else:
params['sqlFormat'] = "standard"
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
########################################################################
class TableLayer(FeatureLayer):
"""Table object is exactly like FeatureLayer object"""
pass | {
"content_hash": "be51c3dc6266e7b422bb92ab84fb3938",
"timestamp": "",
"source": "github",
"line_count": 1258,
"max_line_length": 105,
"avg_line_length": 43.29570747217806,
"alnum_prop": 0.48219072448867184,
"repo_name": "jgravois/ArcREST",
"id": "03e607e17dfbd28c61d966b7b784794930970819",
"size": "54466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/arcrest/agol/layer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1591951"
}
],
"symlink_target": ""
} |
"""This example creates a mobile creative asset in a given advertiser. Currently
only gif, jpg, jpeg, png and wbmp files are supported as mobile assets. To
create an advertiser, run create_advertiser.py.
Tags: creative.saveCreativeAsset
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import base64
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.common import Utils
from adspygoogle import DfaClient
ADVERTISER_ID = 'INSERT_ADVERTISER_ID_HERE'
ASSET_NAME = 'INSERT_MOBILE_ASSET_NAME_HERE'
PATH_TO_FILE = 'INSERT_PATH_TO_FILE_HERE'
def main(client, advertiser_id, asset_name, path_to_file):
# Initialize appropriate service.
creative_service = client.GetCreativeService(
'https://advertisersapitest.doubleclick.net', 'v1.19')
# Convert file into format that can be sent in SOAP messages.
content = Utils.ReadFile(path_to_file)
content = base64.encodestring(content)
# Construct and save mobile asset.
image_asset = {
'name': asset_name,
'advertiserId': advertiser_id,
'content': content,
'forHTMLCreatives': 'true'
}
result = creative_service.SaveCreativeAsset(image_asset)[0]
# Display results.
print ('Creative asset with file name of \'%s\' was created.'
% result['savedFilename'])
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client, ADVERTISER_ID, ASSET_NAME, PATH_TO_FILE)
| {
"content_hash": "c7cbdbd534a25efcdea22654f307e603",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 30.352941176470587,
"alnum_prop": 0.6989664082687338,
"repo_name": "lociii/googleads-python-lib",
"id": "f93696bcf35c109411309f0f92b490bedeb9ca43",
"size": "2166",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfa/v1_19/create_mobile_asset.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3481618"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
} |
"""
Backdoc is a tool for backbone-like documentation generation.
Backdoc main goal is to help to generate one page documentation from one markdown source file.
https://github.com/chibisov/backdoc
"""
import sys
import argparse
from markdown2 import Markdown
template_html = open('./template.html', 'r').read()
PY3 = sys.version_info[0] == 3
def force_text(text):
if PY3:
type = str
else:
type = unicode
if isinstance(text, type):
return text
else:
return text.decode('utf-8')
class BackDoc(object):
def __init__(self, markdown_converter, template_html, stdin, stdout):
self.markdown_converter = markdown_converter
self.template_html = force_text(template_html)
self.stdin = stdin
self.stdout = stdout
self.parser = self.get_parser()
def run(self, argv):
kwargs = self.get_kwargs(argv)
str_bytes = self.get_result_html(**kwargs)
if PY3:
output = str_bytes
else:
output = str_bytes.encode('utf-8')
self.stdout.write(output)
def get_kwargs(self, argv):
parsed = dict(self.parser.parse_args(argv)._get_kwargs())
return self.prepare_kwargs_from_parsed_data(parsed)
def prepare_kwargs_from_parsed_data(self, parsed):
kwargs = {}
kwargs['title'] = force_text(parsed.get('title') or 'Documentation')
if parsed.get('source'):
kwargs['markdown_src'] = open(parsed['source'], 'r').read()
else:
kwargs['markdown_src'] = self.stdin.read()
kwargs['markdown_src'] = force_text(kwargs['markdown_src'] or '')
return kwargs
def get_result_html(self, title, markdown_src):
response = self.get_converted_to_html_response(markdown_src)
return (
self.template_html.replace('<!-- title -->', title)
.replace('<!-- toc -->', response.toc_html and force_text(response.toc_html) or '')
.replace('<!-- main_content -->', force_text(response))
)
def get_converted_to_html_response(self, markdown_src):
return self.markdown_converter.convert(markdown_src)
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'-t',
'--title',
help='Documentation title header',
required=False,
)
parser.add_argument(
'-s',
'--source',
help='Markdown source file path',
required=False,
)
return parser
if __name__ == '__main__':
BackDoc(
markdown_converter=Markdown(extras=['toc']),
template_html=template_html,
stdin=sys.stdin,
stdout=sys.stdout
).run(argv=sys.argv[1:])
| {
"content_hash": "c4e2b274b8447a7b213c9264dd3687f3",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 113,
"avg_line_length": 30.376344086021504,
"alnum_prop": 0.5805309734513274,
"repo_name": "chibisov/backdoc",
"id": "6224717397b2064173dd8ebd7f6d6945b3e65204",
"size": "2871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/backdoc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "31141"
},
{
"name": "Python",
"bytes": "223321"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _, string_concat
from django.forms.fields import CharField
from django.core.exceptions import ValidationError
from phonenumber_field.validators import validate_international_phonenumber
from phonenumber_field.phonenumber import to_python
from phonenumber_field.widgets import PhoneNumberWidget
class PhoneNumberField(CharField):
widget = PhoneNumberWidget
default_error_messages = {
'invalid': _(u'Enter a valid phone number.'),
}
default_validators = [validate_international_phonenumber]
def to_python(self, value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
msg = string_concat(
self.error_messages['invalid'],
u" The provided value, {0}, is invalid.".format(phone_number.raw_input)
)
raise ValidationError(msg)
return phone_number
| {
"content_hash": "2720c980145d30d876354ba920cf7845",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 87,
"avg_line_length": 39.541666666666664,
"alnum_prop": 0.7017913593256059,
"repo_name": "DjangoAdminHackers/django-phonenumber-field",
"id": "949526cd9c278bcf17559a5b3fca821a99abc5db",
"size": "972",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "phonenumber_field/formfields.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import base64
import re
from pywps import xml_util as etree
from pywps.app.Common import Metadata
from pywps.exceptions import InvalidParameterValue
from pywps.inout.formats import Format
from pywps.inout import basic
from copy import deepcopy
from pywps.validator.mode import MODE
from pywps.inout.literaltypes import AnyValue, NoValue, ValuesReference, AllowedValue
CDATA_PATTERN = re.compile(r'<!\[CDATA\[(.*?)\]\]>')
class BoundingBoxInput(basic.BBoxInput):
"""
:param string identifier: The name of this input.
:param string title: Human readable title
:param string abstract: Longer text description
:param crss: List of supported coordinate reference
system (e.g. ['EPSG:4326'])
:param list keywords: Keywords that characterize this input.
:param int dimensions: 2 or 3
:param str workdir: working directory, to save temporary file objects in.
:param list metadata: TODO
:param int min_occurs: how many times this input occurs
:param int max_occurs: how many times this input occurs
:param metadata: List of metadata advertised by this process. They
should be :class:`pywps.app.Common.Metadata` objects.
:param dict[str,dict[str,str]] translations: The first key is the RFC 4646 language code,
and the nested mapping contains translated strings accessible by a string property.
e.g. {"fr-CA": {"title": "Mon titre", "abstract": "Une description"}}
"""
def __init__(self, identifier, title, crss=None, abstract='', keywords=[],
dimensions=2, workdir=None, metadata=[], min_occurs=1,
max_occurs=1,
mode=MODE.NONE,
default=None, default_type=basic.SOURCE_TYPE.DATA,
translations=None):
basic.BBoxInput.__init__(self, identifier, title=title, crss=crss,
abstract=abstract, keywords=keywords,
dimensions=dimensions, workdir=workdir, metadata=metadata,
min_occurs=min_occurs, max_occurs=max_occurs,
mode=mode, default=default,
default_type=default_type,
translations=translations)
self.as_reference = False
@property
def json(self):
"""Get JSON representation of the input
"""
return {
'identifier': self.identifier,
'title': self.title,
'abstract': self.abstract,
'keywords': self.keywords,
'type': 'bbox',
'crs': self.crs,
'crss': self.crss,
'metadata': [m.json for m in self.metadata],
'bbox': self.data,
'll': self.ll,
'ur': self.ur,
'dimensions': self.dimensions,
'workdir': self.workdir,
'mode': self.valid_mode,
'min_occurs': self.min_occurs,
'max_occurs': self.max_occurs,
'translations': self.translations,
}
@classmethod
def from_json(cls, json_input):
instance = cls(
identifier=json_input['identifier'],
title=json_input.get('title'),
abstract=json_input.get('abstract'),
crss=json_input.get('crss'),
keywords=json_input.get('keywords'),
metadata=[Metadata.from_json(data) for data in json_input.get('metadata', [])],
dimensions=json_input.get('dimensions'),
workdir=json_input.get('workdir'),
mode=json_input.get('mode'),
min_occurs=json_input.get('min_occurs'),
max_occurs=json_input.get('max_occurs'),
translations=json_input.get('translations'),
)
instance.data = json_input['bbox']
return instance
def clone(self):
"""Create copy of yourself
"""
return deepcopy(self)
class ComplexInput(basic.ComplexInput):
"""
Complex data input
:param str identifier: The name of this input.
:param str title: Title of the input
:param pywps.inout.formats.Format supported_formats: List of supported
formats
:param pywps.inout.formats.Format data_format: default data format
:param str abstract: Input abstract
:param list keywords: Keywords that characterize this input.
:param str workdir: working directory, to save temporary file objects in.
:param list metadata: TODO
:param int min_occurs: minimum occurrence
:param int max_occurs: maximum occurrence
:param pywps.validator.mode.MODE mode: validation mode (none to strict)
:param dict[str,dict[str,str]] translations: The first key is the RFC 4646 language code,
and the nested mapping contains translated strings accessible by a string property.
e.g. {"fr-CA": {"title": "Mon titre", "abstract": "Une description"}}
"""
def __init__(self, identifier, title, supported_formats,
data_format=None, abstract='', keywords=[], workdir=None, metadata=[], min_occurs=1,
max_occurs=1, mode=MODE.NONE,
default=None, default_type=basic.SOURCE_TYPE.DATA, translations=None):
"""constructor"""
basic.ComplexInput.__init__(self, identifier, title=title,
supported_formats=supported_formats,
data_format=data_format, abstract=abstract,
keywords=keywords, workdir=workdir, metadata=metadata,
min_occurs=min_occurs,
max_occurs=max_occurs, mode=mode,
default=default, default_type=default_type, translations=translations)
self.as_reference = False
self.method = ''
@property
def json(self):
"""Get JSON representation of the input
"""
data = {
'identifier': self.identifier,
'title': self.title,
'abstract': self.abstract,
'keywords': self.keywords,
'metadata': [m.json for m in self.metadata],
'type': 'complex',
'data_format': self.data_format.json,
'asreference': self.as_reference,
'supported_formats': [frmt.json for frmt in self.supported_formats],
'workdir': self.workdir,
'mode': self.valid_mode,
'min_occurs': self.min_occurs,
'max_occurs': self.max_occurs,
'translations': self.translations,
}
if self.prop == 'file':
data['file'] = self.file
elif self.prop == 'url':
data["href"] = self.url
elif self.prop == 'data':
data = self._json_data(data)
elif self.prop == 'stream':
# we store the stream in the data property
data = self._json_data(data)
if self.data_format:
if self.data_format.mime_type:
data['mimetype'] = self.data_format.mime_type
if self.data_format.encoding:
data['encoding'] = self.data_format.encoding
if self.data_format.schema:
data['schema'] = self.data_format.schema
return data
@classmethod
def from_json(cls, json_input):
data_format = json_input.get('data_format')
if data_format is not None:
data_format = Format(
schema=data_format.get('schema'),
extension=data_format.get('extension'),
mime_type=data_format.get('mime_type', ""),
encoding=data_format.get('encoding')
)
instance = cls(
identifier=json_input['identifier'],
title=json_input.get('title'),
abstract=json_input.get('abstract'),
keywords=json_input.get('keywords', []),
workdir=json_input.get('workdir'),
metadata=[Metadata.from_json(data) for data in json_input.get('metadata', [])],
data_format=data_format,
supported_formats=[
Format(
schema=infrmt.get('schema'),
extension=infrmt.get('extension'),
mime_type=infrmt.get('mime_type'),
encoding=infrmt.get('encoding')
) for infrmt in json_input.get('supported_formats', [])
],
mode=json_input.get('mode', MODE.NONE),
translations=json_input.get('translations'),
)
instance.as_reference = json_input.get('asreference', False)
if json_input.get('file'):
instance.file = json_input['file']
elif json_input.get('href'):
instance.url = json_input['href']
elif json_input.get('data'):
data = json_input['data']
if data_format.encoding == 'base64':
instance.data = base64.b64decode(data)
else:
# remove cdata tag if it exists (issue #553)
if isinstance(data, str):
match = CDATA_PATTERN.match(data)
if match:
data = match.group(1)
instance.data = data
return instance
def _json_data(self, data):
"""Return Data node
"""
if self.data:
if self.data_format.mime_type in ["application/xml", "application/gml+xml", "text/xml"]:
# Note that in a client-server round trip, the original and returned file will not be identical.
data_doc = etree.parse(self.file)
data["data"] = etree.tostring(data_doc, pretty_print=True).decode('utf-8')
else:
if self.data_format.encoding == 'base64':
data["data"] = self.base64.decode('utf-8')
else:
# Otherwise we assume all other formats are unsafe and need to be enclosed in a CDATA tag.
if isinstance(self.data, bytes):
out = self.data.decode(self.data_format.encoding or 'utf-8')
else:
out = self.data
data["data"] = '<![CDATA[{}]]>'.format(out)
return data
def clone(self):
"""Create copy of yourself
"""
return deepcopy(self)
class LiteralInput(basic.LiteralInput):
"""
:param str identifier: The name of this input.
:param str title: Title of the input
:param pywps.inout.literaltypes.LITERAL_DATA_TYPES data_type: data type
:param str workdir: working directory, to save temporary file objects in.
:param str abstract: Input abstract
:param list keywords: Keywords that characterize this input.
:param list metadata: TODO
:param str uoms: units
:param int min_occurs: minimum occurence
:param int max_occurs: maximum occurence
:param pywps.validator.mode.MODE mode: validation mode (none to strict)
:param pywps.inout.literaltypes.AnyValue allowed_values: or :py:class:`pywps.inout.literaltypes.AllowedValue` object
:param metadata: List of metadata advertised by this process. They
should be :class:`pywps.app.Common.Metadata` objects.
:param dict[str,dict[str,str]] translations: The first key is the RFC 4646 language code,
and the nested mapping contains translated strings accessible by a string property.
e.g. {"fr-CA": {"title": "Mon titre", "abstract": "Une description"}}
"""
def __init__(self, identifier, title=None, data_type=None, workdir=None, abstract='', keywords=[],
metadata=[], uoms=None,
min_occurs=1, max_occurs=1,
mode=MODE.SIMPLE, allowed_values=None,
default=None, default_type=basic.SOURCE_TYPE.DATA, translations=None):
"""Constructor
"""
data_type = data_type or 'string'
basic.LiteralInput.__init__(self, identifier, title=title,
data_type=data_type, workdir=workdir, abstract=abstract,
keywords=keywords, metadata=metadata,
uoms=uoms, min_occurs=min_occurs,
max_occurs=max_occurs, mode=mode,
allowed_values=allowed_values,
default=default, default_type=default_type,
translations=translations)
self.as_reference = False
@property
def json(self):
"""Get JSON representation of the input
"""
data = {
'identifier': self.identifier,
'title': self.title,
'abstract': self.abstract,
'keywords': self.keywords,
'metadata': [m.json for m in self.metadata],
'type': 'literal',
'data_type': self.data_type,
'workdir': self.workdir,
'allowed_values': [value.json for value in self.allowed_values],
'any_value': self.any_value,
'mode': self.valid_mode,
'min_occurs': self.min_occurs,
'max_occurs': self.max_occurs,
'translations': self.translations,
# other values not set in the constructor
}
if self.values_reference:
data['values_reference'] = self.values_reference.json
if self.uoms:
data["uoms"] = [uom.json for uom in self.uoms]
if self.uom:
data["uom"] = self.uom.json
if self.data is not None:
data['data'] = str(self.data)
return data
@classmethod
def from_json(cls, json_input):
allowed_values = []
for allowed_value in json_input.get('allowed_values', []):
if allowed_value['type'] == 'anyvalue':
allowed_values.append(AnyValue())
elif allowed_value['type'] == 'novalue':
allowed_values.append(NoValue())
elif allowed_value['type'] == 'valuesreference':
allowed_values.append(ValuesReference.from_json(allowed_value))
elif allowed_value['type'] == 'allowedvalue':
allowed_values.append(AllowedValue.from_json(allowed_value))
json_input_copy = deepcopy(json_input)
json_input_copy['allowed_values'] = allowed_values
json_input_copy['uoms'] = [
basic.UOM(uom['uom'], uom['reference'])
for uom in json_input.get('uoms', [])
]
data = json_input_copy.pop('data', None)
uom = json_input_copy.pop('uom', None)
metadata = json_input_copy.pop('metadata', [])
json_input_copy.pop('type', None)
json_input_copy.pop('any_value', None)
json_input_copy.pop('values_reference', None)
instance = cls(**json_input_copy)
instance.metadata = [Metadata.from_json(d) for d in metadata]
instance.data = data
if uom:
instance.uom = basic.UOM(uom['uom'], uom['reference'])
return instance
def clone(self):
"""Create copy of yourself
"""
return deepcopy(self)
def input_from_json(json_data):
data_type = json_data.get('type', 'literal')
if data_type in ['complex', 'reference']:
inpt = ComplexInput.from_json(json_data)
elif data_type == 'literal':
inpt = LiteralInput.from_json(json_data)
elif data_type == 'bbox':
inpt = BoundingBoxInput.from_json(json_data)
else:
raise InvalidParameterValue("Input type not recognized: {}".format(data_type))
return inpt
| {
"content_hash": "1e988e6f6912a50dbe27eb00acb0eaec",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 120,
"avg_line_length": 40.82945736434109,
"alnum_prop": 0.5656604012404278,
"repo_name": "geopython/pywps",
"id": "eb5b1535e0d5eaeae2dee1ab95b25ecf2ac017ba",
"size": "16069",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pywps/inout/inputs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "454259"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import huawei_utils
LOG = logging.getLogger(__name__)
class SmartQos(object):
def __init__(self, client):
self.client = client
def create_qos(self, qos, lun_id):
policy_id = None
try:
# Check QoS priority.
if huawei_utils.check_qos_high_priority(qos):
self.client.change_lun_priority(lun_id)
# Create QoS policy and activate it.
version = self.client.find_array_version()
if version >= constants.ARRAY_VERSION:
(qos_id, lun_list) = self.client.find_available_qos(qos)
if qos_id:
self.client.add_lun_to_qos(qos_id, lun_id, lun_list)
else:
policy_id = self.client.create_qos_policy(qos, lun_id)
self.client.activate_deactivate_qos(policy_id, True)
else:
policy_id = self.client.create_qos_policy(qos, lun_id)
self.client.activate_deactivate_qos(policy_id, True)
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
if policy_id is not None:
self.client.delete_qos_policy(policy_id)
def delete_qos(self, qos_id):
qos_info = self.client.get_qos_info(qos_id)
qos_status = qos_info['RUNNINGSTATUS']
# 2: Active status.
if qos_status == constants.STATUS_QOS_ACTIVE:
self.client.activate_deactivate_qos(qos_id, False)
self.client.delete_qos_policy(qos_id)
class SmartPartition(object):
def __init__(self, client):
self.client = client
def add(self, opts, lun_id):
if opts['smartpartition'] != 'true':
return
if not opts['partitionname']:
raise exception.InvalidInput(
reason=_('Partition name is None, please set '
'smartpartition:partitionname in key.'))
partition_id = self.client.get_partition_id_by_name(
opts['partitionname'])
if not partition_id:
raise exception.InvalidInput(
reason=(_('Can not find partition id by name %(name)s.')
% {'name': opts['partitionname']}))
self.client.add_lun_to_partition(lun_id, partition_id)
class SmartCache(object):
def __init__(self, client):
self.client = client
def add(self, opts, lun_id):
if opts['smartcache'] != 'true':
return
if not opts['cachename']:
raise exception.InvalidInput(
reason=_('Cache name is None, please set '
'smartcache:cachename in key.'))
cache_id = self.client.get_cache_id_by_name(opts['cachename'])
if not cache_id:
raise exception.InvalidInput(
reason=(_('Can not find cache id by cache name %(name)s.')
% {'name': opts['cachename']}))
self.client.add_lun_to_cache(lun_id, cache_id)
class SmartX(object):
def get_smartx_specs_opts(self, opts):
# Check that smarttier is 0/1/2/3
opts = self.get_smarttier_opts(opts)
opts = self.get_smartthin_opts(opts)
opts = self.get_smartcache_opts(opts)
opts = self.get_smartpartition_opts(opts)
return opts
def get_smarttier_opts(self, opts):
if opts['smarttier'] == 'true':
if not opts['policy']:
opts['policy'] = '1'
elif opts['policy'] not in ['0', '1', '2', '3']:
raise exception.InvalidInput(
reason=(_('Illegal value specified for smarttier: '
'set to either 0, 1, 2, or 3.')))
else:
opts['policy'] = '0'
return opts
def get_smartthin_opts(self, opts):
if opts['thin_provisioning_support'] == 'true':
if opts['thick_provisioning_support'] == 'true':
raise exception.InvalidInput(
reason=(_('Illegal value specified for thin: '
'Can not set thin and thick at the same time.')))
else:
opts['LUNType'] = 1
if opts['thick_provisioning_support'] == 'true':
opts['LUNType'] = 0
return opts
def get_smartcache_opts(self, opts):
if opts['smartcache'] == 'true':
if not opts['cachename']:
raise exception.InvalidInput(
reason=_('Cache name is None, please set '
'smartcache:cachename in key.'))
else:
opts['cachename'] = None
return opts
def get_smartpartition_opts(self, opts):
if opts['smartpartition'] == 'true':
if not opts['partitionname']:
raise exception.InvalidInput(
reason=_('Partition name is None, please set '
'smartpartition:partitionname in key.'))
else:
opts['partitionname'] = None
return opts
| {
"content_hash": "0c53128316922f19a67fa5f0e23d4cb0",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 36.23287671232877,
"alnum_prop": 0.5495274102079395,
"repo_name": "potsmaster/cinder",
"id": "fd408d7b5441dee529cb1f0e970e91eee6ffea15",
"size": "5939",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/huawei/smartx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12496416"
},
{
"name": "Shell",
"bytes": "8172"
}
],
"symlink_target": ""
} |
from . import static_pbkdf2
from . import per_team
from . import nonce
from . import regex
_Validators = {
'static_pbkdf2': static_pbkdf2.StaticPBKDF2Validator,
'static_pbkdf2_ci': static_pbkdf2.CaseStaticPBKDF2Validator,
'per_team': per_team.PerTeamValidator,
'nonce_166432': nonce.Nonce_16_64_Base32_Validator,
'nonce_245632': nonce.Nonce_24_56_Base32_Validator,
'nonce_328832': nonce.Nonce_32_88_Base32_Validator,
'regex': regex.RegexValidator,
'regex_ci': regex.RegexCaseValidator,
}
def GetDefaultValidator():
return 'static_pbkdf2'
def GetValidatorForChallenge(challenge):
cls = _Validators[challenge.validator]
return cls(challenge)
def ValidatorNames():
return {k: getattr(v, 'name', k) for k, v in _Validators.items()}
def ValidatorMeta():
meta = {}
for k, v in _Validators.items():
meta[k] = {
'name': v.name,
'per_team': v.per_team,
'flag_gen': v.flag_gen,
}
return meta
def IsValidator(name):
return name in _Validators
__all__ = [GetValidatorForChallenge, ValidatorNames]
| {
"content_hash": "317bc68866b520ed90686c8439bd7b3e",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 69,
"avg_line_length": 25.52173913043478,
"alnum_prop": 0.6303236797274276,
"repo_name": "google/ctfscoreboard",
"id": "816411ddcfe2c0b7515e4d6a0c5fda5115cd7a76",
"size": "1771",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scoreboard/validators/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "753"
},
{
"name": "HTML",
"bytes": "46017"
},
{
"name": "JavaScript",
"bytes": "128035"
},
{
"name": "Makefile",
"bytes": "867"
},
{
"name": "Python",
"bytes": "216795"
},
{
"name": "SCSS",
"bytes": "13249"
},
{
"name": "Shell",
"bytes": "584"
}
],
"symlink_target": ""
} |
"""
Word Embedding Module
@author: Ruchika Chhabra
"""
#!/usr/bin/python
import csv
import gensim
import numpy
import sys
import re
import operator
class WordEmbedding():
'''
This class has the following functionality:
1. Reads the input text file, containing sentences to be clustered.
2. Loads the word2vec pre-trained model.
3. Finds vector corresponding to each input sentence.
4. Creates an output CSV file containing input sentence and
corresponding vector representation.
Attributes:
-----------
1. input_file_path: Input text file containing sentences, which
are to be represented in vector format.
2. word2vec_model : Path of word2vec pre-trained model.
3. word_vector_dim: Dimension of vector to be generated for input
sentence.
'''
def __init__(self, input_file_path, word2vec_model, word_vector_dim):
'''
This method is called to create an instance of this class.
Parameters
----------
1. input_file_path: Path of Input text file containing sentences,
which are to be represented in vector format.
2. word2vec_model : Path of word2vec pre-trained model.
3. word_vector_dim: Dimension of vector to be generated for input
sentence.
'''
self.input_file_path = input_file_path
self.word2vec_model = word2vec_model
self.word_vector_dim = word_vector_dim
def sentence_to_vector(self):
'''
1. This method creates vector for each input sentence read from input
text file.
2. Sentence vector is generated by average of word vectors corresponding
to each word in the input sentence.
'''
# Write an output CSV file containing sentences and
# corresponding vector representation.
out_file = open('sentence_vectors.csv','wb+')
csv_writer = csv.writer(out_file)
# Load pre-trained word2vec model in binary format.
print '-LOADING WORD2VEC MODEL'
word_model = gensim.models.KeyedVectors.load_word2vec_format(self.word2vec_model, binary= True)
print '-LOADING COMPLETED'
# Read Input Text File line by line
print '-CONVERTING INPUT SENTENCES => VECTORS'
for sentence in open(self.input_file_path):
# sum of vectors corresponding to each word of sentence.
sum_of_word_vectors = numpy.zeros(int(self.word_vector_dim))
no_of_words = 0
sentence = re.sub('[^a-zA-Z0-9]', ' ', sentence)
for word in sentence.split():
no_of_words += 1
try:
# Find Vector corresponding to each word of sentence.
sum_of_word_vectors = sum_of_word_vectors + word_model[word]
except:
# If vector corresponding to a word does not exist,
# then skip that word.
empty_vector = numpy.zeros(int(self.word_vector_dim))
sum_of_word_vectors = sum_of_word_vectors + empty_vector
no_of_words -=1
avg_of_word_vec = (sum_of_word_vectors/no_of_words)
# Write sentence and corresponding vector in output CSV file.
csv_row = [sentence]
csv_row.extend(avg_of_word_vec)
csv_writer.writerow(csv_row)
out_file.close()
return 'sentence_vectors.csv'
| {
"content_hash": "6906b282c66a9340a1a20732fdba81af",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 97,
"avg_line_length": 33.16304347826087,
"alnum_prop": 0.6971484759095379,
"repo_name": "Ruchi2507/Text-Clustering",
"id": "e97cbe0419b05fbc5c34744e0736496c016fd464",
"size": "3051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "word_embedding/word_embedding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17342"
}
],
"symlink_target": ""
} |
from django.urls import reverse
def test_article_list(admin_client, article):
response = admin_client.get(reverse('articles:list'))
assert response.status_code == 200
list_items = response.context['list_items'][0]['fields']
item_dict = {}
for item in list_items:
try:
field = item['field']
value = item['value']
except (KeyError, TypeError):
# Not a dict, so continue
continue
item_dict[field] = value
assert item_dict['tags'][0] == 'Tag 0'
assert item_dict['category'] == 'Category 0'
assert item_dict['title'] == 'Article 0'
assert item_dict['description'] == ''
| {
"content_hash": "7528e21ff54d4e1ab82b1b85f2394032",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 60,
"avg_line_length": 30.772727272727273,
"alnum_prop": 0.5923190546528804,
"repo_name": "sanoma/django-arctic",
"id": "e70eac949230208944d2fd87117edcc3459a8d30",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_articles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "82255"
},
{
"name": "Dockerfile",
"bytes": "364"
},
{
"name": "HTML",
"bytes": "73412"
},
{
"name": "JavaScript",
"bytes": "33592"
},
{
"name": "Python",
"bytes": "224568"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import asyncore
from email.mime.text import MIMEText
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from smtplib import SMTPException
from ssl import SSLError
from django.core import mail
from django.core.mail import (EmailMessage, mail_admins, mail_managers,
EmailMultiAlternatives, send_mail, send_mass_mail)
from django.core.mail.backends import console, dummy, locmem, filebased, smtp
from django.core.mail.message import BadHeaderError
from django.test import SimpleTestCase
from django.test import override_settings
from django.utils.encoding import force_text, force_bytes
from django.utils.six import PY3, StringIO, binary_type
from django.utils.translation import ugettext_lazy
if PY3:
from email.utils import parseaddr
from email import message_from_bytes, message_from_binary_file
else:
from email.Utils import parseaddr
from email import (message_from_string as message_from_bytes,
message_from_file as message_from_binary_file)
class HeadersCheckMixin(object):
def assertMessageHasHeaders(self, message, headers):
"""
Check that :param message: has all :param headers: headers.
:param message: can be an instance of an email.Message subclass or a
string with the contens of an email message.
:param headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, binary_type):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def test_ascii(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com, other@example.com')
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'cc@example.com'])
# Test multiple CC with multiple To
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com'])
# Testing with Bcc
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'], bcc=['bcc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_recipients_as_tuple(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ('to@example.com', 'other@example.com'), cc=('cc@example.com', 'cc.other@example.com'), bcc=('bcc@example.com',))
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
email = EmailMessage(ugettext_lazy('Subject\nInjection Test'), 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
def test_space_continuation(self):
"""
Test for space continuation character in long (ascii) subject headers (#7747)
"""
email = EmailMessage('Long subject lines that get wrapped should contain a space continuation character to get expected behavior in Outlook and Thunderbird', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
# Note that in Python 3, maximum line length has increased from 76 to 78
self.assertEqual(message['Subject'].encode(), b'Long subject lines that get wrapped should contain a space continuation\n character to get expected behavior in Outlook and Thunderbird')
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', 'from@example.com', ['to@example.com'], headers=headers)
self.assertMessageHasHeaders(email.message(), {
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', 'from@example.com'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', 'to@example.com'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
})
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'],
headers={'To': 'mailing-list@example.com'})
message = email.message()
self.assertEqual(message['To'], 'mailing-list@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'])
message = email.message()
self.assertEqual(message['To'], 'list-subscriber@example.com, list-subscriber2@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Firstname Sürname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?Firstname_S=C3=BCrname?= <to@example.com>, other@example.com')
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Sürname, Firstname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?S=C3=BCrname=2C_Firstname?= <to@example.com>, other@example.com')
def test_unicode_headers(self):
email = EmailMessage("Gżegżółka", "Content", "from@example.com", ["to@example.com"],
headers={"Sender": '"Firstname Sürname" <sender@example.com>',
"Comments": 'My Sürname is non-ASCII'})
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <sender@example.com>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', '"Sürname, Firstname" <to@example.com>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <to@example.com>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', 'from@example.com', ['other@example.com'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'other@example.com')})
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, 'from@example.com', ['to@example.com'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, {
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>'))
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.locmem.EmailBackend'), locmem.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.console.EmailBackend'), console.EmailBackend)
tmp_dir = tempfile.mkdtemp()
try:
self.assertIsInstance(mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir), filebased.EmailBackend)
finally:
shutil.rmtree(tmp_dir)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', 'nobody@example.com')],
MANAGERS=[('nobody', 'nobody@example.com')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', 'from1@example.com', ['to1@example.com']),
('Subject2', 'Content2', 'from2@example.com', ['to2@example.com']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage('Subject', 'From the future', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertFalse(b'>From the future' in email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage('Subject', 'UTF-8 encoded body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertFalse(b'Content-Transfer-Encoding: base64' in msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage('Subject', 'Body with only ASCII characters.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 7bit' in s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage('Subject', 'Body with latin characters: àáä.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 8bit' in s)
msg = EmailMessage('Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 8bit' in s)
def test_dont_base64_encode_message_rfc822(self):
# Ticket #18967
# Shouldn't use base64 encoding for a child EmailMessage attachment.
# Create a child message first
child_msg = EmailMessage('Child Subject', 'Some body of child message', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
child_s = child_msg.message().as_string()
# Now create a parent
parent_msg = EmailMessage('Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
# Attach to parent as a string
parent_msg.attach(content=child_s, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertTrue(str('Child Subject') in parent_s)
# Feature test: try attaching email.Message object directly to the mail.
parent_msg = EmailMessage('Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertTrue(str('Child Subject') in parent_s)
# Feature test: try attaching Django's EmailMessage object directly to the mail.
parent_msg = EmailMessage('Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
parent_msg.attach(content=child_msg, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertTrue(str('Child Subject') in parent_s)
class PythonGlobalState(SimpleTestCase):
"""
Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text
parts shouldn't pollute global email Python package charset registry when
django.mail.message is imported.
"""
def test_utf8(self):
txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8')
self.assertTrue('Content-Transfer-Encoding: base64' in txt.as_string())
def test_7bit(self):
txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8')
self.assertTrue('Content-Transfer-Encoding: base64' in txt.as_string())
def test_8bit_latin(self):
txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8')
self.assertTrue(str('Content-Transfer-Encoding: base64') in txt.as_string())
def test_8bit_non_latin(self):
txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8')
self.assertTrue(str('Content-Transfer-Encoding: base64') in txt.as_string())
class BaseEmailBackendTests(HeadersCheckMixin, object):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests must provide a get_mailbox_content() method')
def flush_mailbox(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests may require a flush_mailbox() method')
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [
m.as_string() for m in mailbox]))
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "from@example.com")
self.assertEqual(message.get_all("to"), ["to@example.com"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(force_text(message.get_payload(decode=True)), 'Je t\'aime très fort')
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', 'from@example.com', ['to@example.com'])
email2 = EmailMessage('Subject', 'Content2', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email1, email2])
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), "Content1")
self.assertEqual(messages[1].get_payload(), "Content2")
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <from@example.com>',
["to@example.com"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <from@example.com>")
def test_plaintext_send_mail(self):
"""
Test send_mail without the html_message
regression test for adding html_message parameter to send_mail()
"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'])
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message.get_content_type(), 'text/plain')
def test_html_send_mail(self):
"""Test html_message argument to send_mail"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'], html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(MANAGERS=[('nobody', 'nobody@example.com')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', 'nobody@example.com')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', 'nobody+admin@example.com')],
MANAGERS=[('nobody', 'nobody+manager@example.com')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=(), MANAGERS=())
def test_empty_admins(self):
"""
Test that mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com'),
('Cc', 'cc@example.com')})
self.assertIn('\nDate: ', message.as_string())
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com',
['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.assertEqual(message.get('cc'), 'cc@xn--4ca9at.com')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_close_connection(self):
"""
Test that connection can be closed (even when not explicitely opened)
"""
conn = mail.get_connection(username='', password='')
try:
conn.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super(LocmemBackendTests, self).tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', 'from@example.com', ['to@example.com'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super(FileBackendTests, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super(FileBackendTests, self).tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(force_bytes('\n' + ('-' * 79) + '\n', encoding='ascii'))
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@example.com')
self.assertEqual(message.get('to'), 'to@example.com')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super(ConsoleBackendTests, self).setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(ConsoleBackendTests, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split(str('\n' + ('-' * 79) + '\n'))
return [message_from_bytes(force_bytes(m)) for m in messages if m]
def test_console_stream_kwarg(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
message = force_bytes(s.getvalue().split('\n' + ('-' * 79) + '\n')[0])
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com')})
self.assertIn(b'\nDate: ', message)
class FakeSMTPChannel(smtpd.SMTPChannel):
def collect_incoming_data(self, data):
try:
super(FakeSMTPChannel, self).collect_incoming_data(data)
except UnicodeDecodeError:
# ignore decode error in SSL/TLS connection tests as we only care
# whether the connection attempt was made
pass
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
channel_class = FakeSMTPChannel
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, *args, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
if PY3:
data = data.encode('utf-8')
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class SMTPBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
@classmethod
def setUpClass(cls):
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
def setUp(self):
super(SMTPBackendTests, self).setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super(SMTPBackendTests, self).tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_auth_attempted(self):
"""
Test that opening the backend with non empty username/password tries
to authenticate against the SMTP server.
"""
backend = smtp.EmailBackend(
username='not empty username', password='not empty password')
try:
self.assertRaisesMessage(SMTPException,
'SMTP AUTH extension not supported by server.', backend.open)
finally:
backend.close()
def test_server_open(self):
"""
Test that open() tells us whether it opened a connection.
"""
backend = smtp.EmailBackend(username='', password='')
self.assertFalse(backend.connection)
opened = backend.open()
backend.close()
self.assertTrue(opened)
def test_server_stopped(self):
"""
Test that closing the backend while the SMTP server is stopped doesn't
raise an exception.
"""
backend = smtp.EmailBackend(username='', password='')
backend.open()
self.server.stop()
try:
backend.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_override_settings(self):
backend = smtp.EmailBackend(use_tls=False)
self.assertFalse(backend.use_tls)
def test_email_tls_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_tls)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_override_settings(self):
backend = smtp.EmailBackend(use_ssl=False)
self.assertFalse(backend.use_ssl)
def test_email_ssl_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_ssl)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_attempts_starttls(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
try:
self.assertRaisesMessage(SMTPException,
'STARTTLS extension not supported by server.', backend.open)
finally:
backend.close()
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_attempts_ssl_connection(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
try:
self.assertRaises(SSLError, backend.open)
finally:
backend.close()
def test_connection_timeout_default(self):
"""Test that the connection's timeout value is None by default."""
connection = mail.get_connection('django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(connection.timeout, None)
def test_connection_timeout_custom(self):
"""Test that the timeout parameter can be customized."""
class MyEmailBackend(smtp.EmailBackend):
def __init__(self, *args, **kwargs):
kwargs.setdefault('timeout', 42)
super(MyEmailBackend, self).__init__(*args, **kwargs)
myemailbackend = MyEmailBackend()
myemailbackend.open()
self.assertEqual(myemailbackend.timeout, 42)
self.assertEqual(myemailbackend.connection.timeout, 42)
myemailbackend.close()
| {
"content_hash": "2ec9e28c2141c41ae610a18c7453bc12",
"timestamp": "",
"source": "github",
"line_count": 985,
"max_line_length": 216,
"avg_line_length": 45.79593908629442,
"alnum_prop": 0.6352390875435058,
"repo_name": "deployed/django",
"id": "935508aa167318ba35230263bae9aa9958ba1212",
"size": "45199",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/mail/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52958"
},
{
"name": "JavaScript",
"bytes": "102315"
},
{
"name": "Python",
"bytes": "9508205"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import owscapable
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
readme = open('README.txt').read()
reqs = [line.strip() for line in open('requirements.txt')]
if sys.version[:3] < '2.7':
reqs += [line.strip() for line in open('requirements-2.6.txt')]
setup(name='OwsCapable',
version=owscapable.__version__,
description='OGC Web Service Parsing Utility (based on OWSLib)',
long_description=readme,
license='BSD',
keywords='gis ogc iso 19115 fgdc dif ows wfs wms sos csw wps wcs capabilities metadata wmts',
author='Sean Gillies',
author_email='sean.gillies@gmail.com',
maintainer='Soren Scott',
maintainer_email='sorenscott@gmail.com',
url='http://github.io/bcube/OwsCapable',
install_requires=reqs,
cmdclass={'test': PyTest},
packages=find_packages(exclude=["docs", "etc", "examples", "tests"]),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: GIS',
]
)
| {
"content_hash": "a12a7349d259e30e1a17a41087bf3bfd",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 99,
"avg_line_length": 33.851063829787236,
"alnum_prop": 0.6373350094280327,
"repo_name": "b-cube/OwsCapable",
"id": "c49b385db0f48151380fe50a9a4dcaa6a04d3044",
"size": "1591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "553283"
}
],
"symlink_target": ""
} |
from django import template
from testpro.forecast.weather import Weather
from testpro.forecast.models import Forecast
from django.core.cache import cache
from django.conf import settings
register = template.Library()
def get_condision():
f = Forecast.objects.get(id=1)
is_cached = "false"
ws = cache.get('ws')
if ws == None:
ws = Weather(f.partner_id, f.key, f.location_id)
cache.set('ws', ws, 120)
else:
is_cached = "true"
return {'rt': ws.rt, 'cached': is_cached, 'ws': ws, 'media_url': settings.__getattr__('MEDIA_URL')}
register.inclusion_tag('forecast/forecast_condision.html')(get_condision)
| {
"content_hash": "678d2c9c5a8055e0992523d856d16da8",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 103,
"avg_line_length": 29.045454545454547,
"alnum_prop": 0.6885758998435054,
"repo_name": "blampe/M2M",
"id": "9867c1cbede9c25152b6082b005affb2b9cd53a2",
"size": "639",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "m2m/forecast/templatetags/forecast_tags.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "754736"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "JavaScript",
"bytes": "21268"
},
{
"name": "PHP",
"bytes": "18"
},
{
"name": "Python",
"bytes": "6374305"
},
{
"name": "Shell",
"bytes": "4721"
}
],
"symlink_target": ""
} |
import itertools
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import case
from sqlalchemy import text
from time import time, localtime, strftime, mktime, strptime, gmtime
from distutils.version import LooseVersion
from . import app
db = SQLAlchemy(app)
class Guilty(db.Model):
__tablename__ = 'guilty'
__table_args__ = (
db.UniqueConstraint('function', 'module', name='unique_guilty_constraint'),
)
id = db.Column(db.Integer, primary_key=True)
function = db.Column(db.String)
module = db.Column(db.String)
comment = db.Column(db.String)
hide = db.Column(db.Boolean, default=False)
def __init__(self, func, mod):
self.function = func
self.module = mod
@staticmethod
def update_comment(guilty_id, comment):
guilty = Guilty.query.filter_by(id=guilty_id).first()
guilty.comment = comment
db.session.commit()
@staticmethod
def get_function(guilty_id):
guilty = Guilty.query.filter_by(id=guilty_id).first()
return guilty and guilty.function or ""
@staticmethod
def get_module(guilty_id):
guilty = Guilty.query.filter_by(id=guilty_id).first()
return guilty and guilty.module or ""
@staticmethod
def update_hidden(guilty_id, status):
guilty = Guilty.query.filter_by(id=guilty_id).first()
guilty.hide = status
db.session.commit()
@staticmethod
def get_hidden_value(guilty_id):
guilty = Guilty.query.filter_by(id=guilty_id).first()
return guilty and guilty.hide or False
@staticmethod
def get_hidden_guilties():
q = db.session.query(Guilty.id, Guilty.function, Guilty.module)
q = q.filter(Guilty.hide == True)
q = q.order_by(Guilty.function)
return q.all()
class Record(db.Model):
__tablename__ = 'records'
id = db.Column(db.Integer, primary_key=True)
architecture = db.Column(db.Text)
bios_version = db.Column(db.Text, default='')
board_name = db.Column(db.Text, default='')
build = db.Column(db.Text, nullable=False)
classification = db.Column(db.Text, nullable=False)
cpu_model = db.Column(db.Text, default='')
event_id = db.Column(db.Text, default='')
external = db.Column(db.Boolean, default=False)
host_type = db.Column(db.Text, default='')
kernel_version = db.Column(db.Text, default=0)
machine_id = db.Column(db.Text, default='')
payload_version = db.Column(db.Integer)
record_version = db.Column(db.Integer, default=0)
severity = db.Column(db.Integer)
system_name = db.Column(db.Text)
timestamp_client = db.Column(db.Numeric)
timestamp_server = db.Column(db.Numeric, nullable=False)
payload = db.Column(db.Text, nullable=False)
processed = db.Column(db.Boolean, default=False)
guilty_id = db.Column(db.Integer, db.ForeignKey('guilty.id'))
guilty = db.Column(db.Text, default='')
guilty = db.relationship('Guilty', backref=db.backref('records', lazy='dynamic'), lazy='joined')
def __init__(self, machine_id, host_type, severity, classification, build, architecture, kernel_version,
record_version, ts_capture, ts_reception, payload_version, system_name,
board_name, bios_version, cpu_model, event_id, external, payload):
self.machine_id = machine_id
self.host_type = host_type
self.architecture = architecture
self.classification = classification
self.build = build
self.kernel_version = kernel_version
self.record_version = record_version
self.severity = severity
self.timestamp_client = ts_capture
self.timestamp_server = ts_reception
self.payload_version = payload_version
self.system_name = system_name
self.external = external
self.board_name = board_name
self.bios_version = bios_version
self.cpu_model = cpu_model
self.event_id = event_id
self.payload = payload
def __repr__(self):
return "<Record(id='{}', class='{}', build='{}', created='{}')>".format(self.id, self.classification, self.build, strftime("%a, %d %b %Y %H:%M:%S", localtime(self.timestamp_client)))
def __str__(self):
return str(self.to_dict())
def to_dict(self):
record = {
'id': self.id,
'machine_id': self.machine_id,
'machine_type': self.host_type,
'arch': self.architecture,
'build': self.build,
'kernel_version': self.kernel_version,
'ts_capture': strftime('%Y-%m-%d %H:%M:%S UTC', gmtime(self.timestamp_client)),
'ts_reception': strftime('%Y-%m-%d %H:%M:%S UTC', gmtime(self.timestamp_server)),
'severity': self.severity,
'classification': self.classification,
'record_version': self.record_version,
'payload': self.payload,
'board_name': self.board_name,
'bios_version': self.bios_version,
'cpu_model': self.cpu_model,
'event_id': self.event_id,
'external': self.external,
}
return record
# for the exported CSV rows
def to_list(self):
record = [
self.id,
self.external,
self.timestamp_server,
self.severity,
self.classification,
self.build,
self.machine_id,
self.payload
]
return record
@staticmethod
def list():
return Record.query.all()
@staticmethod
def create(machine_id, host_type, severity, classification, build, architecture, kernel_version,
record_version, ts_capture, ts_reception, payload_version, system_name,
board_name, bios_version, cpu_model, event_id, external, payload):
try:
record = Record(machine_id, host_type, severity, classification, build, architecture, kernel_version,
record_version, ts_capture, ts_reception, payload_version, system_name,
board_name, bios_version, cpu_model, event_id, external, payload)
db.session.add(record)
db.session.commit()
return record
except:
db.session.rollback()
raise
@staticmethod
def query_records(build, classification, severity, machine_id,
data_source=None, limit=None, payload=None,
not_payload=None, from_id=None):
records = Record.query
if build is not None:
records = records.filter_by(build=build)
if classification is not None:
records = records.filter_by(classification=classification)
if severity is not None:
records = records.filter(Record.severity == severity)
if machine_id is not None:
records = records.filter(Record.machine_id == machine_id)
if payload is not None:
records = records.filter(Record.payload.op('~')(payload))
if not_payload is not None:
records = records.filter(~Record.payload.op('~')(not_payload))
if data_source is not None:
if data_source == "external":
records = records.filter(Record.external == True)
elif data_source == "internal":
records = records.filter(Record.external == False)
if from_id is not None:
records = records.filter(Record.id < from_id)
records = records.order_by(Record.id.desc())
if limit is not None:
records = records.limit(limit)
return records
@staticmethod
def get_record(record_id):
record = Record.query.filter_by(id=record_id).first()
return record
@staticmethod
def filter_records(build, classification, severity, machine_id=None, system_name=None, limit=None, from_date=None,
to_date=None, payload=None, not_payload=None, data_source=None, lastid=None):
records = Record.query
if build is not None:
records = records.filter_by(build=build)
if classification is not None:
if isinstance(classification, list):
records = records.filter(Record.classification.in_(classification))
else:
records = records.filter(Record.classification.like(classification))
if severity is not None:
records = records.filter(Record.severity == severity)
if system_name is not None:
records = records.filter(Record.system_name == system_name)
if machine_id is not None:
records = records.filter(Record.machine_id == machine_id)
if from_date is not None:
from_date = mktime(strptime(from_date, "%Y-%m-%d"))
records = records.filter(Record.timestamp_client >= from_date)
if to_date is not None:
to_date = mktime(strptime(to_date, "%Y-%m-%d"))
records = records.filter(Record.timestamp_client < to_date)
if payload is not None:
records = records.filter(Record.payload.op('~')(payload))
if not_payload is not None:
records = records.filter(~Record.payload.op('~')(not_payload))
if data_source is not None:
if data_source == "external":
records = records.filter(Record.external == True)
elif data_source == "internal":
records = records.filter(Record.external == False)
if lastid is not None:
records = records.filter(Record.id > int(lastid))
records = records.order_by(Record.id.desc())
if limit is not None:
records = records.limit(limit)
return records
@staticmethod
def delete_records():
MAX_DAYS_KEEP_UNFILTERED_RECORDS = app.config.get("MAX_DAYS_KEEP_UNFILTERED_RECORDS", 35)
PURGE_FILTERED_RECORDS = app.config.get("PURGE_FILTERED_RECORDS", {})
try:
def purge_field(field):
for name in PURGE_FILTERED_RECORDS[field].keys():
if PURGE_FILTERED_RECORDS[field][name]:
age = time() - PURGE_FILTERED_RECORDS[field][name] * 24 * 60 * 60
q = db.session.query(Record)
if field == 'classification':
q = q.filter(Record.classification.like(name.replace("*", "%")))
else:
q = q.filter(getattr(Record, field) == name)
q = q.filter(Record.timestamp_server < age)
if q.all():
count = db.session.query(Record).filter(Record.id.in_([x.id for x in q.all()])).delete(synchronize_session=False)
print("Deleted {} {} records".format(count, name))
for field in PURGE_FILTERED_RECORDS.keys():
purge_field(field)
if MAX_DAYS_KEEP_UNFILTERED_RECORDS:
unfiltered_age = time() - MAX_DAYS_KEEP_UNFILTERED_RECORDS * 24 * 60 * 60
q = db.session.query(Record.id)
for field in PURGE_FILTERED_RECORDS.keys():
if field == 'classification':
for classification in PURGE_FILTERED_RECORDS[field].keys():
q = q.filter(~Record.classification.like(classification.replace("*", "%")))
else:
for name in PURGE_FILTERED_RECORDS[field].keys():
q = q.filter(getattr(Record, field) != name)
q = q.filter(Record.timestamp_server < unfiltered_age)
if q.all():
count = db.session.query(Record).filter(Record.id.in_([x.id for x in q.all()])).delete(synchronize_session=False)
print("Deleted {} old records".format(count))
db.session.commit()
except Exception as e:
app.logger.error("Record purging failed")
app.logger.error(e)
db.session.rollback()
@staticmethod
def get_recordcnts_by_build():
q = db.session.query(Record.build, db.func.count(Record.id))
q = q.filter(Record.build.op('~')('^[0-9]+$'))
q = q.group_by(Record.build).order_by(cast(Record.build, db.Integer)).all()
return q
@staticmethod
def get_builds():
q = db.session.query(Record.build).distinct()
q = q.filter(Record.build.op('~')('^[0-9]+$'))
q = q.order_by(Record.build)
return sorted(q.all(), key=lambda x: LooseVersion(x[0]), reverse=True)
@staticmethod
def get_recordcnts_by_classification():
q = db.session.query(Record.classification, db.func.count(Record.id).label('total'))
q = q.group_by(Record.classification)
q = q.order_by(desc('total'))
return q.all()
@staticmethod
def expand_class(D):
A, B, C = D
return ["{}/*".format(A), "{}/{}/*".format(A, B), "{}/{}/{}".format(A, B, C)]
@staticmethod
def get_classifications(with_regex=False):
q = db.session.query(Record.classification).distinct()
if with_regex:
classes = [Record.expand_class(c[0].split('/')) for c in q.all()]
return sorted(set(itertools.chain(*classes)))
else:
return q.all()
@staticmethod
def get_os_map():
q = db.session.query(Record.system_name, Record.build).order_by(Record.system_name).group_by(Record.system_name, Record.build).all()
result = {}
for x in q:
result.setdefault(x[0], []).append(x[1])
return result
@staticmethod
def get_recordcnts_by_machine_type():
q = db.session.query(Record.host_type, db.func.count(Record.id).label('total'))
q = q.group_by(Record.host_type)
q = q.order_by(desc('total'))
return q.all()
@staticmethod
def get_recordcnts_by_severity():
q = db.session.query(Record.severity, db.func.count(Record.id)).group_by(Record.severity).all()
return q
@staticmethod
def get_crashcnts_by_class(classes=None):
q = db.session.query(Record.classification, db.func.count(Record.id))
if classes:
q = q.filter(Record.classification.in_(classes))
else:
q = q.filter(Record.classification.like('org.clearlinux/crash/%'))
q = q.group_by(Record.classification)
return q.all()
@staticmethod
def get_crashcnts_by_build(classes=None):
q = db.session.query(Record.build, db.func.count(Record.id))
if not classes:
classes = ['org.clearlinux/crash/clr']
q = q.filter(Record.classification.in_(classes))
q = q.filter(Record.build.op('~')('^[0-9]+$'))
q = q.group_by(Record.build)
q = q.order_by(desc(cast(Record.build, db.Integer)))
q = q.limit(10)
return q.all()
@staticmethod
def get_top_crash_guilties(classes=None):
q = db.session.query(Guilty.function, Guilty.module, Record.build, db.func.count(Record.id).label('total'), Guilty.id, Guilty.comment)
q = q.join(Record)
if not classes:
classes = ['org.clearlinux/crash/clr']
q = q.filter(Record.classification.in_(classes))
q = q.filter(Record.build.op('~')('^[0-9][0-9]+$'))
q = q.filter(cast(Record.build, db.Integer) <= 100000)
q = q.filter(Guilty.hide == False)
q = q.group_by(Guilty.function, Guilty.module, Guilty.comment, Guilty.id, Record.build)
q = q.order_by(desc(cast(Record.build, db.Integer)), desc('total'))
# query for records created in the last week (~ 10 Clear builds)
q = q.filter(Record.build.in_(sorted(tuple(set([x[2] for x in q.all()])), key=lambda x: int(x))[-8:]))
interval_sec = 24 * 60 * 60 * 7
current_time = time()
sec_in_past = current_time - interval_sec
q = q.filter(Record.timestamp_client > sec_in_past)
return q.all()
@staticmethod
def get_new_crash_records(classes=None, id=None):
q = db.session.query(Record)
if not classes:
classes = ['org.clearlinux/crash/clr']
q = q.filter(Record.classification.in_(classes))
q = q.filter(Record.system_name == 'clear-linux-os')
q = q.filter(Record.processed == False)
if id:
q = q.filter(Record.id == id)
return q.all()
@staticmethod
def set_processed_flag(record):
record.processed = True
@staticmethod
def get_guilty_for_funcmod(func, mod):
q = db.session.query(Guilty).filter_by(function=func, module=mod).first()
return q
@staticmethod
def get_guilty_id_for_record(record_id):
q = db.session.query(Guilty.id).join(Record)
q = q.filter(Record.id == record_id)
return q.first()
@staticmethod
def init_guilty(func, mod):
return Guilty(func, mod)
@staticmethod
def create_guilty_for_record(record, guilty):
record.guilty = guilty
@staticmethod
def commit_guilty_changes():
# just commit for now
db.session.commit()
@staticmethod
def get_crash_backtraces(classes=None, guilty_id=None, machine_id=None, build=None, most_recent=None, record_id=None):
q = db.session.query(Record.payload, Record.id)
# Short circuit if we know the record ID
if record_id:
q = q.filter(Record.id == record_id)
return q.first()
if build:
q = q.filter(Record.build == build)
if not classes:
classes = ['org.clearlinux/crash/clr']
q = q.filter(Record.classification.in_(classes))
q = q.filter(Record.system_name == 'clear-linux-os')
if guilty_id:
q = q.filter(Record.guilty_id == guilty_id)
if machine_id:
q = q.filter(Record.machine_id == machine_id)
if most_recent:
interval_sec = 24 * 60 * 60 * int(most_recent)
current_time = time()
sec_in_past = current_time - interval_sec
q = q.filter(Record.timestamp_client > sec_in_past)
return q.all()
@staticmethod
def reset_processed_records(classes=None, id=None):
q = db.session.query(Record)
if not classes:
classes = ['org.clearlinux/crash/clr']
q = q.filter(Record.classification.in_(classes))
q = q.filter(Record.system_name == 'clear-linux-os')
if id:
q = q.filter(Record.id == id)
records = q.all()
for r in records:
r.processed = False
db.session.commit()
@staticmethod
def get_machine_ids_for_guilty(id, most_recent=None):
q = db.session.query(Record.build, Record.machine_id, db.func.count(Record.id).label('total'), Record.guilty_id)
q = q.filter(Record.guilty_id == id)
q = q.filter(Record.system_name == 'clear-linux-os')
q = q.filter(Record.build.op('~')('^[0-9][0-9]+$'))
q = q.group_by(Record.build, Record.machine_id, Record.guilty_id)
q = q.order_by(desc(cast(Record.build, db.Integer)), desc('total'))
if most_recent:
interval_sec = 24 * 60 * 60 * int(most_recent)
current_time = time()
sec_in_past = current_time - interval_sec
q = q.filter(Record.timestamp_client > sec_in_past)
return q.all()
@staticmethod
def get_update_msgs():
q = db.session.query(Record.payload)
q = q.filter(Record.classification == "org.clearlinux/swupd-client/update")
sec_2_weeks = 24 * 60 * 60 * 7
current_time = time()
time_2_weeks_ago = current_time - sec_2_weeks
# query for records created in that last 2 weeks
q = q.filter(Record.timestamp_client > time_2_weeks_ago)
return q.all()
@staticmethod
def get_swupd_msgs(most_recent=None):
q = db.session.query(Record.timestamp_client, Record.machine_id, Record.payload)
q = q.filter(Record.classification.like('org.clearlinux/swupd-client/%'))
if most_recent:
interval_sec = 24 * 60 * 60 * int(most_recent)
current_time = time()
sec_in_past = current_time - interval_sec
q = q.filter(Record.timestamp_client > sec_in_past)
q = q.order_by(desc(Record.timestamp_client))
return q
@staticmethod
def get_heartbeat_msgs(most_recent=None):
# These two expressions are SQL CASE conditional expressions, later
# used within count(distinct ...) aggregates for the query.
internal_expr = case([(Record.external == False, Record.machine_id), ]).label('internal_count')
external_expr = case([(Record.external == True, Record.machine_id), ]).label('external_count')
q = db.session.query(Record.build, db.func.count(db.distinct(internal_expr)), db.func.count(db.distinct(external_expr)))
q = q.filter(Record.classification == "org.clearlinux/heartbeat/ping")
q = q.filter(Record.system_name == 'clear-linux-os')
q = q.group_by(Record.build)
if most_recent:
interval_sec = 24 * 60 * 60 * int(most_recent)
current_time = time()
sec_in_past = current_time - interval_sec
q = q.filter(Record.timestamp_client > sec_in_past)
q = q.order_by(cast(Record.build, db.Integer))
return q.all()
@staticmethod
def get_latest_timestamp_server():
sql = text("SELECT timestamp_server FROM records WHERE id = (SELECT MAX(id) FROM records)")
result = db.engine.execute(sql)
return result.first()[0]
class GuiltyBlacklist(db.Model):
__tablename__ = 'guilty_blacklisted'
id = db.Column(db.Integer, primary_key=True)
function = db.Column(db.String)
module = db.Column(db.String)
def __init__(self, func, mod):
self.function = func
self.module = mod
def __repr__(self):
return "<GuiltyBlacklist(id='{}', guilty='{}:{}')>".format(self.id, self.function, self.module)
def __str__(self):
return str(self.to_dict())
def to_dict(self):
guilty = {
'function': self.function,
'module': self.module
}
return guilty
@staticmethod
def add(func, mod):
try:
g = GuiltyBlacklist(func, mod)
db.session.add(g)
db.session.commit()
return g
except:
db.session.rollback()
raise
@staticmethod
def remove(func, mod):
q = db.session.query(GuiltyBlacklist)
q = q.filter_by(function=func, module=mod)
entry = q.first()
db.session.delete(entry)
db.session.commit()
@staticmethod
def get_guilties():
q = db.session.query(GuiltyBlacklist.function, GuiltyBlacklist.module)
q = q.order_by(GuiltyBlacklist.function)
return q.all()
@staticmethod
def exists(func, mod):
q = db.session.query(GuiltyBlacklist.function, GuiltyBlacklist.module)
q = q.filter_by(function=func, module=mod)
return len(q.all()) != 0 and True or False
@staticmethod
def update(to_add, to_remove):
try:
for i in to_add:
if not GuiltyBlacklist.exists(i[0], i[1]):
g = GuiltyBlacklist(i[0], i[1])
db.session.add(g)
for i in to_remove:
if GuiltyBlacklist.exists(i[0], i[1]):
q = db.session.query(GuiltyBlacklist)
q = q.filter_by(function=i[0], module=i[1])
entry = q.first()
db.session.delete(entry)
db.session.commit()
except:
db.session.rollback()
raise
# vi: ts=4 et sw=4 sts=4
| {
"content_hash": "0aace72ad0c3b82c498be8c1dffb389c",
"timestamp": "",
"source": "github",
"line_count": 614,
"max_line_length": 190,
"avg_line_length": 39.229641693811075,
"alnum_prop": 0.5870386515547806,
"repo_name": "alexjch/telemetrics-backend",
"id": "88eec7a80402e7c7731e7d1aeeae7ee0454c83f9",
"size": "24170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/shared_utils/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1791"
},
{
"name": "Dockerfile",
"bytes": "252"
},
{
"name": "HTML",
"bytes": "60204"
},
{
"name": "JavaScript",
"bytes": "16170"
},
{
"name": "Makefile",
"bytes": "1074"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "151603"
},
{
"name": "Shell",
"bytes": "36843"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
VERSION = '1.2.12'
setup(name='BitEx', version=VERSION, author='Nils Diefenbach',
author_email='23okrs20+pypi@mykolab.com',
url="https://github.com/nlsdfnbch/bitex.git",
test_suite='nose.collector', tests_require=['nose'],
packages=find_packages(exclude=['contrib', 'docs', 'tests*', 'travis']),
install_requires=['requests', 'websocket-client', 'autobahn', 'pusherclient'],
description='Python3-based API Framework for Crypto Exchanges',
license='MIT', classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers'],
) | {
"content_hash": "913a141eb8171b72c833a1a6ecc2eb89",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 84,
"avg_line_length": 44.2,
"alnum_prop": 0.6485671191553545,
"repo_name": "nlsdfnbch/bitex-crawler",
"id": "99ca51c9d546ffe1e5a495587e6ac8c5ed7f8a24",
"size": "663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33916"
}
],
"symlink_target": ""
} |
""" BVT tests for Service offerings"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import isAlmostEqual
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from nose.plugins.attrib import attr
_multiprocess_shared_ = True
class Services:
"""Test Service offerings Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "password",
},
"off":
{
"name": "Service Offering",
"displaytext": "Service Offering",
"cpunumber": 1,
"cpuspeed": 100, # MHz
"memory": 128, # in MBs
},
"small":
# Create a small virtual machine instance with disk offering
{
"displayname": "testserver",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"medium": # Create a medium virtual machine instance
{
"displayname": "testserver",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"service_offerings":
{
"tiny":
{
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"small":
{
# Small service offering ID to for change VM
# service offering from medium to small
"name": "Small Instance",
"displaytext": "Small Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"medium":
{
# Medium service offering ID to for
# change VM service offering from small to medium
"name": "Medium Instance",
"displaytext": "Medium Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 256,
}
},
"ostype": 'CentOS 5.3 (64-bit)',
}
class TestCreateServiceOffering(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.services = Services().services
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"])
def test_01_create_service_offering(self):
"""Test to create service offering"""
# Validate the following:
# 1. createServiceOfferings should return a valid information for newly created offering
# 2. The Cloud Database contains the valid information
service_offering = ServiceOffering.create(
self.apiclient,
self.services["off"]
)
self.cleanup.append(service_offering)
self.debug("Created service offering with ID: %s" % service_offering.id)
list_service_response = list_service_offering(
self.apiclient,
id=service_offering.id
)
self.assertEqual(
isinstance(list_service_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_service_response),
0,
"Check Service offering is created"
)
service_response = list_service_response[0]
self.assertEqual(
list_service_response[0].cpunumber,
self.services["off"]["cpunumber"],
"Check server id in createServiceOffering"
)
self.assertEqual(
list_service_response[0].cpuspeed,
self.services["off"]["cpuspeed"],
"Check cpuspeed in createServiceOffering"
)
self.assertEqual(
list_service_response[0].displaytext,
self.services["off"]["displaytext"],
"Check server displaytext in createServiceOfferings"
)
self.assertEqual(
list_service_response[0].memory,
self.services["off"]["memory"],
"Check memory in createServiceOffering"
)
self.assertEqual(
list_service_response[0].name,
self.services["off"]["name"],
"Check name in createServiceOffering"
)
return
class TestServiceOfferings(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.api_client = super(TestServiceOfferings, cls).getClsTestClient().getApiClient()
cls.services = Services().services
domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = cls.zone.networktype
cls.service_offering_1 = ServiceOffering.create(
cls.api_client,
cls.services["off"]
)
cls.service_offering_2 = ServiceOffering.create(
cls.api_client,
cls.services["off"]
)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Set Zones and disk offerings
cls.services["small"]["zoneid"] = cls.zone.id
cls.services["small"]["template"] = template.id
cls.services["medium"]["zoneid"] = cls.zone.id
cls.services["medium"]["template"] = template.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["small"]
)
cls.medium_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["medium"]
)
cls.medium_virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["medium"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.medium_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.small_offering,
cls.medium_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
cls.api_client = super(TestServiceOfferings, cls).getClsTestClient().getApiClient()
#Clean up, terminate the created templates
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"])
def test_02_edit_service_offering(self):
"""Test to update existing service offering"""
# Validate the following:
# 1. updateServiceOffering should return
# a valid information for newly created offering
#Generate new name & displaytext from random data
random_displaytext = random_gen()
random_name = random_gen()
self.debug("Updating service offering with ID: %s" %
self.service_offering_1.id)
cmd = updateServiceOffering.updateServiceOfferingCmd()
#Add parameters for API call
cmd.id = self.service_offering_1.id
cmd.displaytext = random_displaytext
cmd.name = random_name
self.apiclient.updateServiceOffering(cmd)
list_service_response = list_service_offering(
self.apiclient,
id=self.service_offering_1.id
)
self.assertEqual(
isinstance(list_service_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_service_response),
0,
"Check Service offering is updated"
)
self.assertEqual(
list_service_response[0].displaytext,
random_displaytext,
"Check server displaytext in updateServiceOffering"
)
self.assertEqual(
list_service_response[0].name,
random_name,
"Check server name in updateServiceOffering"
)
return
@attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"])
def test_03_delete_service_offering(self):
"""Test to delete service offering"""
# Validate the following:
# 1. deleteServiceOffering should return
# a valid information for newly created offering
self.debug("Deleting service offering with ID: %s" %
self.service_offering_2.id)
self.service_offering_2.delete(self.apiclient)
list_service_response = list_service_offering(
self.apiclient,
id=self.service_offering_2.id
)
self.assertEqual(
list_service_response,
None,
"Check if service offering exists in listDiskOfferings"
)
return
@attr(tags=["advanced", "advancedns", "smoke"])
def test_04_change_offering_small(self):
"""Test to change service to a small capacity
"""
# Validate the following
# 1. Log in to the Vm .We should see that the CPU and memory Info of
# this Vm matches the one specified for "Small" service offering.
# 2. Using listVM command verify that this Vm
# has Small service offering Id.
self.debug("Stopping VM - ID: %s" % self.medium_virtual_machine.id)
self.medium_virtual_machine.stop(self.apiclient)
# Ensure that VM is in stopped state
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.medium_virtual_machine.id
)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state == 'Stopped':
self.debug("VM state: %s" % vm.state)
else:
raise Exception(
"Failed to stop VM (ID: %s) in change service offering" % vm.id)
self.debug("Change Service offering VM - ID: %s" %
self.medium_virtual_machine.id)
cmd = changeServiceForVirtualMachine.changeServiceForVirtualMachineCmd()
cmd.id = self.medium_virtual_machine.id
cmd.serviceofferingid = self.small_offering.id
self.apiclient.changeServiceForVirtualMachine(cmd)
self.debug("Starting VM - ID: %s" % self.medium_virtual_machine.id)
self.medium_virtual_machine.start(self.apiclient)
# Ensure that VM is in running state
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.medium_virtual_machine.id
)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state == 'Running':
self.debug("VM state: %s" % vm.state)
else:
raise Exception(
"Failed to start VM (ID: %s) after changing service offering" % vm.id)
try:
ssh = self.medium_virtual_machine.get_ssh_client()
except Exception as e:
self.fail(
"SSH Access failed for %s: %s" %\
(self.medium_virtual_machine.ipaddress, e)
)
cpuinfo = ssh.execute("cat /proc/cpuinfo")
cpu_cnt = len([i for i in cpuinfo if "processor" in i])
#'cpu MHz\t\t: 2660.499'
cpu_speed = [i for i in cpuinfo if "cpu MHz" in i][0].split()[3]
meminfo = ssh.execute("cat /proc/meminfo")
#MemTotal: 1017464 kB
total_mem = [i for i in meminfo if "MemTotal" in i][0].split()[1]
self.debug(
"CPU count: %s, CPU Speed: %s, Mem Info: %s" % (
cpu_cnt,
cpu_speed,
total_mem
))
self.assertAlmostEqual(
int(cpu_cnt),
self.small_offering.cpunumber,
"Check CPU Count for small offering"
)
self.assertAlmostEqual(
list_vm_response[0].cpuspeed,
self.small_offering.cpuspeed,
"Check CPU Speed for small offering"
)
self.assertTrue(
isAlmostEqual(int(int(total_mem) / 1024),
int(self.small_offering.memory),
range=20
),
"Check Memory(kb) for small offering"
)
return
| {
"content_hash": "275c5ec62e1b4493c996bf18deae3b6a",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 96,
"avg_line_length": 35.82588235294118,
"alnum_prop": 0.5113621437015631,
"repo_name": "mufaddalq/cloudstack-datera-driver",
"id": "22273d766e47dc1fdd135bc311ce608713e6aabd",
"size": "16013",
"binary": false,
"copies": "1",
"ref": "refs/heads/4.2",
"path": "test/integration/smoke/test_service_offerings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "250"
},
{
"name": "Batchfile",
"bytes": "6317"
},
{
"name": "CSS",
"bytes": "302008"
},
{
"name": "FreeMarker",
"bytes": "4917"
},
{
"name": "HTML",
"bytes": "38671"
},
{
"name": "Java",
"bytes": "79758943"
},
{
"name": "JavaScript",
"bytes": "4237188"
},
{
"name": "Perl",
"bytes": "1879"
},
{
"name": "Python",
"bytes": "5187499"
},
{
"name": "Shell",
"bytes": "803262"
}
],
"symlink_target": ""
} |
from piprng import PiPrng
class TickATackGame(object):
EMPTY = ' '
PLAYERS = ['P', 'Q']
DEBUG = False
def __init__(self, player_start=0):
self.player_char = self.EMPTY
self.computer_char = self.EMPTY
self.prng = PiPrng()
self.player_prng = PiPrng(player_start)
self.scores = {'player': 0, 'computer': 0}
def init_game(self, replay, player_char):
if self.DEBUG:
print("init_game - scores: p:{0},c:{1}, replay:{2}".format(self.scores['player'], self.scores['computer'], replay))
if not replay:
if self.DEBUG:
print("Set scores to 0 from p:{0},c:{1}".format(self.scores['player'], self.scores['computer']))
self.scores['player'] = 0
self.scores['computer'] = 0
if player_char == self.PLAYERS[0]:
self.player_char = self.PLAYERS[0]
self.computer_char = self.PLAYERS[1]
else:
self.player_char = self.PLAYERS[1]
self.computer_char = self.PLAYERS[0]
self.moves = 0
self.winner = self.EMPTY
self.top = [self.EMPTY, self.EMPTY, self.EMPTY]
self.middle = [self.EMPTY, self.EMPTY, self.EMPTY]
self.bottom = [self.EMPTY, self.EMPTY, self.EMPTY]
def set_board_content(self):
self.game_board = " 0 1 2 \n" \
" +---+---+---+\n" \
" 0 | {0} | {1} | {2} |\n" \
" +---+---+---+\n" \
" 1 | {3} | {4} | {5} |\n" \
" +---+---+---+\n" \
" 2 | {6} | {7} | {8} |\n" \
" +---+---+---+\n".format(
self.top[0], self.top[1], self.top[2],
self.middle[0], self.middle[1], self.middle[2],
self.bottom[0], self.bottom[1], self.bottom[2])
def get_board_content(self):
self.set_board_content()
return self.game_board
def has_valid_coords(self, move_coords):
return (0 <= move_coords['row'] and move_coords['row'] <= 2 and
0 <= move_coords['col'] and move_coords['col'] <= 2)
def coords_unoccupied(self, move_coords):
return (self.game_board_get_at_coord(move_coords) == self.EMPTY)
def game_board_set_at_coord(self, move_coords):
if move_coords['row'] == 0:
self.top[move_coords['col']] = self.PLAYERS[self.moves % 2]
elif move_coords['row'] == 1:
self.middle[move_coords['col']] = self.PLAYERS[self.moves % 2]
else:
self.bottom[move_coords['col']] = self.PLAYERS[self.moves % 2]
def game_board_get_at_coord(self, move_coords):
result = self.EMPTY
if move_coords['row'] == 0:
result = self.top[move_coords['col']]
elif move_coords['row'] == 1:
result = self.middle[move_coords['col']]
else:
result = self.bottom[move_coords['col']]
return result
def is_valid_move(self, move_coords):
return (self.has_valid_coords(move_coords) and self.coords_unoccupied(move_coords))
def update_board(self, move_coords):
result = 0
if self.is_valid_move(move_coords):
self.game_board_set_at_coord(move_coords)
else:
result = 1
return result
def have_winner(self):
result = 1
if self.moves < 3:
return 0
# top row
if self.top[0] == self.top[1] and \
self.top[1] == self.top[2] and \
self.top[0] != self.EMPTY:
self.winner = self.top[0]
# middle row
elif self.middle[0] == self.middle[1] and \
self.middle[1] == self.middle[2] and \
self.middle[0] != self.EMPTY:
self.winner = self.middle[0]
# bottom row
elif self.bottom[0] == self.bottom[1] and \
self.bottom[1] == self.bottom[2] and \
self.bottom[0] != self.EMPTY:
self.winner = self.bottom[0]
# first col
elif self.top[0] == self.middle[0] and \
self.middle[0] == self.bottom[0] and \
self.bottom[0] != self.EMPTY:
self.winner = self.top[0]
# second col
elif self.top[1] == self.middle[1] and \
self.middle[1] == self.bottom[1] and \
self.bottom[1] != self.EMPTY:
self.winner = self.top[1]
# third col
elif self.top[2] == self.middle[2] and \
self.middle[2] == self.bottom[2] and \
self.bottom[2] != self.EMPTY:
self.winner = self.top[2]
# diag top left to bottom right
elif self.top[0] == self.middle[1] and \
self.middle[1] == self.bottom[2] and \
self.bottom[2] != self.EMPTY:
self.winner = self.top[0]
# diag bottom left to top right
elif self.top[2] == self.middle[1] and \
self.middle[1] == self.bottom[0] and \
self.bottom[0] != self.EMPTY:
self.winner = self.top[2]
# no winner
else:
result = 0
return result
def have_moves_remaining(self):
return (self.moves < 9)
def is_player_winner(self):
return (self.winner == self.player_char)
def is_players_turn(self):
return (self.player_char == self.PLAYERS[self.moves % 2])
def move_complete(self):
self.moves += 1
def random_move(self):
return {'row': self.prng.get_next() % 3,
'col': self.prng.get_next() % 3}
def computer_valid_random_move(self):
c_move = self.random_move()
while(not self.is_valid_move(c_move)):
c_move = self.random_move()
return c_move
def player_random_move(self):
return {'row': self.player_prng.get_next() % 3,
'col': self.player_prng.get_next() % 3}
def player_valid_random_move(self):
p_move = self.player_random_move()
while(not self.is_valid_move(p_move)):
p_move = self.player_random_move()
return p_move
def get_scores(self):
return self.scores
def update_score(self):
if self.winner == self.player_char:
self.scores['player'] += 1
else:
self.scores['computer'] += 1
| {
"content_hash": "b54abfa2efe374dedb43531675f12f0a",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 118,
"avg_line_length": 27.989637305699482,
"alnum_prop": 0.6047760088855979,
"repo_name": "f0rki/cb-multios",
"id": "8223e303d2e1f1472fbe6e67c4e38c67b1cc8270",
"size": "6548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "original-challenges/Tick-A-Tack/support/tickatack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "6510"
},
{
"name": "C",
"bytes": "83182317"
},
{
"name": "C++",
"bytes": "2007200"
},
{
"name": "CMake",
"bytes": "4910"
},
{
"name": "GDB",
"bytes": "114"
},
{
"name": "Groff",
"bytes": "262159"
},
{
"name": "Logos",
"bytes": "2944"
},
{
"name": "Makefile",
"bytes": "9008"
},
{
"name": "Objective-C",
"bytes": "98709"
},
{
"name": "Python",
"bytes": "11805403"
},
{
"name": "Ruby",
"bytes": "4515"
},
{
"name": "Shell",
"bytes": "3779"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.